diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def --- a/clang/include/clang/Basic/CodeGenOptions.def +++ b/clang/include/clang/Basic/CodeGenOptions.def @@ -65,6 +65,7 @@ CODEGENOPT(ExperimentalStrictFloatingPoint, 1, 0) ///< Enables the new, experimental ///< strict floating point. CODEGENOPT(EnableNoundefAttrs, 1, 0) ///< Enable emitting `noundef` attributes on IR call arguments and return values +CODEGENOPT(EnableNoundefLoadAttr, 1, 0) ///< Enable emitting `noundef` attributes on IR loads. CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new ///< pass manager. CODEGENOPT(DisableRedZone , 1, 0) ///< Set when -mno-red-zone is enabled. diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -5762,6 +5762,13 @@ PosFlag, NegFlag, BothFlags<[], " analyzing function argument and return types for mandatory definedness">>; +defm enable_noundef_load_analysis : BoolOption<"", + "enable-noundef-load-analysis", + CodeGenOpts<"EnableNoundefLoadAttr">, + DefaultTrue, + PosFlag, + NegFlag, + BothFlags<[], " analyzing memory loads for definedness">>; defm opaque_pointers : BoolOption<"", "opaque-pointers", CodeGenOpts<"OpaquePointers">, diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -672,6 +672,22 @@ SanOpts.has(SanitizerKind::Vptr); } +namespace { +void applyNoundefToLoadInst(bool enable, const clang::QualType &Ty, + llvm::LoadInst *Load) { + if (enable) { + if (auto TyPtr = Ty.getTypePtrOrNull()) { + if (!(TyPtr->isSpecificBuiltinType(BuiltinType::UChar) || + TyPtr->isSpecificBuiltinType(BuiltinType::Char_U) || + TyPtr->isStdByteType())) { + Load->setMetadata(llvm::LLVMContext::MD_noundef, + llvm::MDNode::get(Load->getContext(), None)); + } + } + } +} +} // namespace + void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *Ptr, QualType Ty, CharUnits Alignment, @@ -1722,7 +1738,6 @@ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); // Now load value. llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); - // Shuffle vector to get vec3. V = Builder.CreateShuffleVector(V, ArrayRef{0, 1, 2}, "extractVec"); return EmitFromMemory(V, Ty); @@ -1743,6 +1758,8 @@ Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); } + applyNoundefToLoadInst(CGM.getCodeGenOpts().EnableNoundefLoadAttr, Ty, Load); + CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); if (EmitScalarRangeCheck(Load, Ty, Loc)) { diff --git a/clang/test/CodeGen/X86/avx-builtins.c b/clang/test/CodeGen/X86/avx-builtins.c --- a/clang/test/CodeGen/X86/avx-builtins.c +++ b/clang/test/CodeGen/X86/avx-builtins.c @@ -85,14 +85,14 @@ __m256d test_mm256_broadcast_pd(__m128d* A) { // CHECK-LABEL: test_mm256_broadcast_pd - // CHECK: load <2 x double>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x double>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> return _mm256_broadcast_pd(A); } __m256 test_mm256_broadcast_ps(__m128* A) { // CHECK-LABEL: test_mm256_broadcast_ps - // CHECK: load <4 x float>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x float>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> return _mm256_broadcast_ps(A); } @@ -1210,42 +1210,42 @@ __m256d test_mm256_loadu_pd(double* A) { // CHECK-LABEL: test_mm256_loadu_pd - // CHECK: load <4 x double>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x double>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_pd(A); } __m256 test_mm256_loadu_ps(float* A) { // CHECK-LABEL: test_mm256_loadu_ps - // CHECK: load <8 x float>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <8 x float>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_ps(A); } __m256i test_mm256_loadu_si256(__m256i* A) { // CHECK-LABEL: test_mm256_loadu_si256 - // CHECK: load <4 x i64>, ptr %{{.+}}, align 1{{$}} + // CHECK: load <4 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_si256(A); } __m256 test_mm256_loadu2_m128(float* A, float* B) { // CHECK-LABEL: test_mm256_loadu2_m128 - // CHECK: load <4 x float>, ptr %{{.*}}, align 1{{$}} - // CHECK: load <4 x float>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x float>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} + // CHECK: load <4 x float>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <8 x i32> return _mm256_loadu2_m128(A, B); } __m256d test_mm256_loadu2_m128d(double* A, double* B) { // CHECK-LABEL: test_mm256_loadu2_m128d - // CHECK: load <2 x double>, ptr %{{.*}}, align 1{{$}} - // CHECK: load <2 x double>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x double>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} + // CHECK: load <2 x double>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <4 x i32> return _mm256_loadu2_m128d(A, B); } __m256i test_mm256_loadu2_m128i(__m128i* A, __m128i* B) { // CHECK-LABEL: test_mm256_loadu2_m128i - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector <2 x i64> %{{.*}}, <2 x i64> %{{.*}}, <4 x i32> return _mm256_loadu2_m128i(A, B); } @@ -2167,3 +2167,5 @@ // CHECK: extractelement <8 x float> %{{.*}}, i32 0 return _mm256_cvtss_f32(__a); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/avx512bw-builtins.c b/clang/test/CodeGen/X86/avx512bw-builtins.c --- a/clang/test/CodeGen/X86/avx512bw-builtins.c +++ b/clang/test/CodeGen/X86/avx512bw-builtins.c @@ -2048,7 +2048,7 @@ __m512i test_mm512_loadu_epi16 (void *__P) { // CHECK-LABEL: @test_mm512_loadu_epi16 - // CHECK: load <8 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <8 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm512_loadu_epi16 (__P); } @@ -2067,7 +2067,7 @@ __m512i test_mm512_loadu_epi8 (void *__P) { // CHECK-LABEL: @test_mm512_loadu_epi8 - // CHECK: load <8 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <8 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_epi8 (__P); } @@ -2394,3 +2394,5 @@ // CHECK: @llvm.x86.avx512.mask.pmovus.wb.mem.512 _mm512_mask_cvtusepi16_storeu_epi8 ( __P, __M, __A); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/avx512f-builtins.c b/clang/test/CodeGen/X86/avx512f-builtins.c --- a/clang/test/CodeGen/X86/avx512f-builtins.c +++ b/clang/test/CodeGen/X86/avx512f-builtins.c @@ -282,14 +282,14 @@ __m512i test_mm512_loadu_si512 (void *__P) { // CHECK-LABEL: @test_mm512_loadu_si512 - // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1{{$}} + // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm512_loadu_si512 ( __P); } __m512i test_mm512_loadu_epi32 (void *__P) { // CHECK-LABEL: @test_mm512_loadu_epi32 - // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1{{$}} + // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_epi32 (__P); } @@ -310,7 +310,7 @@ __m512i test_mm512_loadu_epi64 (void *__P) { // CHECK-LABEL: @test_mm512_loadu_epi64 - // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1{{$}} + // CHECK: load <8 x i64>, <8 x i64>* %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_epi64 (__P); } @@ -331,7 +331,7 @@ __m512 test_mm512_loadu_ps(void *p) { // CHECK-LABEL: @test_mm512_loadu_ps - // CHECK: load <16 x float>, <16 x float>* {{.*}}, align 1{{$}} + // CHECK: load <16 x float>, <16 x float>* {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_ps(p); } @@ -345,7 +345,7 @@ __m512d test_mm512_loadu_pd(void *p) { // CHECK-LABEL: @test_mm512_loadu_pd - // CHECK: load <8 x double>, <8 x double>* {{.*}}, align 1{{$}} + // CHECK: load <8 x double>, <8 x double>* {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_pd(p); } @@ -10867,3 +10867,5 @@ // CHECK: @llvm.x86.avx512.mask.scatter.dpq.512 _mm512_mask_i32loscatter_epi64(__addr, __mask, __index, __v1, 2); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c --- a/clang/test/CodeGen/X86/avx512fp16-builtins.c +++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c @@ -1455,7 +1455,7 @@ __m128h test_mm_load_sh(void const *A) { // CHECK-LABEL: test_mm_load_sh - // CHECK: load half, half* %{{.*}}, align 1{{$}} + // CHECK: load half, half* %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm_load_sh(A); } @@ -1491,19 +1491,19 @@ __m512h test_mm512_loadu_ph(void *p) { // CHECK-LABEL: @test_mm512_loadu_ph - // CHECK: load <32 x half>, <32 x half>* {{.*}}, align 1{{$}} + // CHECK: load <32 x half>, <32 x half>* {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm512_loadu_ph(p); } __m256h test_mm256_loadu_ph(void *p) { // CHECK-LABEL: @test_mm256_loadu_ph - // CHECK: load <16 x half>, <16 x half>* {{.*}}, align 1{{$}} + // CHECK: load <16 x half>, <16 x half>* {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_ph(p); } __m128h test_mm_loadu_ph(void *p) { // CHECK-LABEL: @test_mm_loadu_ph - // CHECK: load <8 x half>, <8 x half>* {{.*}}, align 1{{$}} + // CHECK: load <8 x half>, <8 x half>* {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_ph(p); } @@ -4626,3 +4626,5 @@ // CHECK: @llvm.x86.avx512fp16.mask.vfcmul.csh return _mm_maskz_cmul_round_sch(__U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/avx512vl-builtins.c b/clang/test/CodeGen/X86/avx512vl-builtins.c --- a/clang/test/CodeGen/X86/avx512vl-builtins.c +++ b/clang/test/CodeGen/X86/avx512vl-builtins.c @@ -6878,7 +6878,7 @@ __m128i test_mm_loadu_epi64(void const *__P) { // CHECK-LABEL: @test_mm_loadu_epi64 - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm_loadu_epi64(__P); } @@ -6896,7 +6896,7 @@ __m256i test_mm256_loadu_epi64(void const *__P) { // CHECK-LABEL: @test_mm256_loadu_epi64 - // CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_epi64(__P); } @@ -6914,7 +6914,7 @@ __m128i test_mm_loadu_epi32(void const *__P) { // CHECK-LABEL: @test_mm_loadu_epi32 - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_epi32(__P); } @@ -6932,7 +6932,7 @@ __m256i test_mm256_loadu_epi32(void const *__P) { // CHECK-LABEL: @test_mm256_loadu_epi32 - // CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_epi32(__P); } @@ -9737,3 +9737,5 @@ // CHECK: icmp sgt <4 x i64> %{{.*}}, %{{.*}} return (__mmask8)_mm256_cmpgt_epi64_mask(__a, __b); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c --- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c @@ -2498,7 +2498,7 @@ __m128i test_mm_loadu_epi16(void const *__P) { // CHECK-LABEL: @test_mm_loadu_epi16 - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm_loadu_epi16(__P); } @@ -2516,7 +2516,7 @@ __m256i test_mm256_loadu_epi16(void const *__P) { // CHECK-LABEL: @test_mm256_loadu_epi16 - // CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_epi16(__P); } @@ -2534,7 +2534,7 @@ __m128i test_mm_loadu_epi8(void const *__P) { // CHECK-LABEL: @test_mm_loadu_epi8 - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_epi8(__P); } @@ -2552,7 +2552,7 @@ __m256i test_mm256_loadu_epi8(void const *__P) { // CHECK-LABEL: @test_mm256_loadu_epi8 - // CHECK: load <4 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm256_loadu_epi8(__P); } @@ -3236,3 +3236,5 @@ // CHECK: @llvm.x86.avx512.mask.pmovs.wb.mem.256 _mm256_mask_cvtsepi16_storeu_epi8 ( __P, __M, __A); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/sse-builtins.c b/clang/test/CodeGen/X86/sse-builtins.c --- a/clang/test/CodeGen/X86/sse-builtins.c +++ b/clang/test/CodeGen/X86/sse-builtins.c @@ -384,7 +384,7 @@ __m128 test_mm_load_ss(float* y) { // CHECK-LABEL: test_mm_load_ss - // CHECK: load float, ptr {{.*}}, align 1{{$}} + // CHECK: load float, ptr {{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} // CHECK: insertelement <4 x float> undef, float %{{.*}}, i32 0 // CHECK: insertelement <4 x float> %{{.*}}, float 0.000000e+00, i32 1 // CHECK: insertelement <4 x float> %{{.*}}, float 0.000000e+00, i32 2 @@ -404,7 +404,7 @@ __m128 test_mm_loadh_pi(__m128 x, __m64* y) { // CHECK-LABEL: test_mm_loadh_pi - // CHECK: load <2 x float>, ptr {{.*}}, align 1{{$}} + // CHECK: load <2 x float>, ptr {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector {{.*}} <4 x i32> return _mm_loadh_pi(x,y); @@ -412,7 +412,7 @@ __m128 test_mm_loadl_pi(__m128 x, __m64* y) { // CHECK-LABEL: test_mm_loadl_pi - // CHECK: load <2 x float>, ptr {{.*}}, align 1{{$}} + // CHECK: load <2 x float>, ptr {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: shufflevector {{.*}} <4 x i32> return _mm_loadl_pi(x,y); @@ -427,7 +427,7 @@ __m128 test_mm_loadu_ps(float* A) { // CHECK-LABEL: test_mm_loadu_ps - // CHECK: load <4 x float>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <4 x float>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_ps(A); } @@ -807,3 +807,5 @@ // CHECK: xor <4 x i32> return _mm_xor_ps(A, B); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c --- a/clang/test/CodeGen/X86/sse2-builtins.c +++ b/clang/test/CodeGen/X86/sse2-builtins.c @@ -627,7 +627,7 @@ __m128d test_mm_load_sd(double const* A) { // CHECK-LABEL: test_mm_load_sd - // CHECK: load double, ptr %{{.*}}, align 1{{$}} + // CHECK: load double, ptr %{{.*}}, align 1, !noundef [[NOUNDEF:![0-9]+]]{{$}} return _mm_load_sd(A); } @@ -647,14 +647,14 @@ __m128d test_mm_loadh_pd(__m128d x, void* y) { // CHECK-LABEL: test_mm_loadh_pd - // CHECK: load double, ptr %{{.*}}, align 1{{$}} + // CHECK: load double, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 1 return _mm_loadh_pd(x, y); } __m128i test_mm_loadl_epi64(__m128i* y) { // CHECK: test_mm_loadl_epi64 - // CHECK: load i64, ptr {{.*}}, align 1{{$}} + // CHECK: load i64, ptr {{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <2 x i64> undef, i64 {{.*}}, i32 0 // CHECK: insertelement <2 x i64> {{.*}}, i64 0, i32 1 return _mm_loadl_epi64(y); @@ -662,7 +662,7 @@ __m128d test_mm_loadl_pd(__m128d x, void* y) { // CHECK-LABEL: test_mm_loadl_pd - // CHECK: load double, ptr %{{.*}}, align 1{{$}} + // CHECK: load double, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <2 x double> undef, double %{{.*}}, i32 0 // CHECK: extractelement <2 x double> %{{.*}}, i32 1 // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 1 @@ -678,19 +678,19 @@ __m128d test_mm_loadu_pd(double const* A) { // CHECK-LABEL: test_mm_loadu_pd - // CHECK: load <2 x double>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x double>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_pd(A); } __m128i test_mm_loadu_si128(__m128i const* A) { // CHECK-LABEL: test_mm_loadu_si128 - // CHECK: load <2 x i64>, ptr %{{.*}}, align 1{{$}} + // CHECK: load <2 x i64>, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} return _mm_loadu_si128(A); } __m128i test_mm_loadu_si64(void const* A) { // CHECK-LABEL: test_mm_loadu_si64 - // CHECK: load i64, ptr %{{.*}}, align 1{{$}} + // CHECK: load i64, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <2 x i64> undef, i64 %{{.*}}, i32 0 // CHECK: insertelement <2 x i64> %{{.*}}, i64 0, i32 1 return _mm_loadu_si64(A); @@ -698,7 +698,7 @@ __m128i test_mm_loadu_si32(void const* A) { // CHECK-LABEL: test_mm_loadu_si32 - // CHECK: load i32, ptr %{{.*}}, align 1{{$}} + // CHECK: load i32, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <4 x i32> undef, i32 %{{.*}}, i32 0 // CHECK: insertelement <4 x i32> %{{.*}}, i32 0, i32 1 // CHECK: insertelement <4 x i32> %{{.*}}, i32 0, i32 2 @@ -708,7 +708,7 @@ __m128i test_mm_loadu_si16(void const* A) { // CHECK-LABEL: test_mm_loadu_si16 - // CHECK: load i16, ptr %{{.*}}, align 1{{$}} + // CHECK: load i16, ptr %{{.*}}, align 1, !noundef [[NOUNDEF]]{{$}} // CHECK: insertelement <8 x i16> undef, i16 %{{.*}}, i32 0 // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 1 // CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 2 @@ -1695,3 +1695,5 @@ // CHECK: xor <2 x i64> %{{.*}}, %{{.*}} return _mm_xor_si128(A, B); } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/aarch64-ls64-inline-asm.c b/clang/test/CodeGen/aarch64-ls64-inline-asm.c --- a/clang/test/CodeGen/aarch64-ls64-inline-asm.c +++ b/clang/test/CodeGen/aarch64-ls64-inline-asm.c @@ -16,8 +16,8 @@ // CHECK-LABEL: @store( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP1:%.*]] = load i512, ptr [[INPUT:%.*]], align 8 -// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[TMP1]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc !3 +// CHECK-NEXT: [[TMP0:%.*]] = load i512, ptr [[INPUT:%.*]], align 8 +// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[TMP0]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc !3 // CHECK-NEXT: ret void // void store(const struct foo *input, void *addr) @@ -27,28 +27,28 @@ // CHECK-LABEL: @store2( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[IN:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[IN:%.*]], align 4, !tbaa [[TBAA4:![0-9]+]], !noundef [[NOUNDEF8:![0-9]+]] // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 1 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 4 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV5:%.*]] = sext i32 [[TMP2]] to i64 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 16 -// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV8:%.*]] = sext i32 [[TMP3]] to i64 // CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 25 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV11:%.*]] = sext i32 [[TMP4]] to i64 // CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 36 -// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX13]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX13]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV14:%.*]] = sext i32 [[TMP5]] to i64 // CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 49 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV17:%.*]] = sext i32 [[TMP6]] to i64 // CHECK-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i32, ptr [[IN]], i64 64 -// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX19]], align 4, !tbaa [[TBAA4]] +// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX19]], align 4, !tbaa [[TBAA4]], !noundef [[NOUNDEF8]] // CHECK-NEXT: [[CONV20:%.*]] = sext i32 [[TMP7]] to i64 // CHECK-NEXT: [[S_SROA_10_0_INSERT_EXT:%.*]] = zext i64 [[CONV20]] to i512 // CHECK-NEXT: [[S_SROA_10_0_INSERT_SHIFT:%.*]] = shl nuw i512 [[S_SROA_10_0_INSERT_EXT]], 448 @@ -72,7 +72,7 @@ // CHECK-NEXT: [[S_SROA_0_0_INSERT_EXT:%.*]] = zext i64 [[CONV]] to i512 // CHECK-NEXT: [[S_SROA_0_0_INSERT_MASK:%.*]] = or i512 [[S_SROA_4_0_INSERT_MASK]], [[S_SROA_4_0_INSERT_SHIFT]] // CHECK-NEXT: [[S_SROA_0_0_INSERT_INSERT:%.*]] = or i512 [[S_SROA_0_0_INSERT_MASK]], [[S_SROA_0_0_INSERT_EXT]] -// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[S_SROA_0_0_INSERT_INSERT]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc !8 +// CHECK-NEXT: tail call void asm sideeffect "st64b $0,[$1]", "r,r,~{memory}"(i512 [[S_SROA_0_0_INSERT_INSERT]], ptr [[ADDR:%.*]]) #[[ATTR1]], !srcloc !9 // CHECK-NEXT: ret void // void store2(int *in, void *addr) diff --git a/clang/test/CodeGen/aarch64-ls64.c b/clang/test/CodeGen/aarch64-ls64.c --- a/clang/test/CodeGen/aarch64-ls64.c +++ b/clang/test/CodeGen/aarch64-ls64.c @@ -20,36 +20,36 @@ // CHECK-C-NEXT: entry: // CHECK-C-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-C-NEXT: [[TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 -// CHECK-C-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META2:![0-9]+]]) -// CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !2 -// CHECK-C-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !2 +// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK-C-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) +// CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !3 +// CHECK-C-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !3, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[TMP]], i32 0, i32 0 // CHECK-C-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 -// CHECK-C-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !2 +// CHECK-C-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !3 // CHECK-C-NEXT: [[TMP3:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 0 -// CHECK-C-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 1 // CHECK-C-NEXT: [[TMP5:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 1 -// CHECK-C-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 2 // CHECK-C-NEXT: [[TMP7:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 2 -// CHECK-C-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 3 // CHECK-C-NEXT: [[TMP9:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 3 -// CHECK-C-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP10:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 4 // CHECK-C-NEXT: [[TMP11:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 4 -// CHECK-C-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 5 // CHECK-C-NEXT: [[TMP13:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 5 -// CHECK-C-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP14:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 6 // CHECK-C-NEXT: [[TMP15:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 6 -// CHECK-C-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP16:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 7 // CHECK-C-NEXT: [[TMP17:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 7 -// CHECK-C-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !2 +// CHECK-C-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !3 // CHECK-C-NEXT: [[TMP18:%.*]] = bitcast %struct.data512_t* [[TMP]] to i8* // CHECK-C-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast (%struct.data512_t* @val to i8*), i8* align 8 [[TMP18]], i64 64, i1 false) // CHECK-C-NEXT: ret void @@ -58,36 +58,36 @@ // CHECK-CXX-NEXT: entry: // CHECK-CXX-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-CXX-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 -// CHECK-CXX-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META2:![0-9]+]]) -// CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !2 -// CHECK-CXX-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !2 +// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK-CXX-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) +// CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8, !noalias !3 +// CHECK-CXX-NEXT: [[TMP1:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noalias !3, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[REF_TMP]], i32 0, i32 0 // CHECK-CXX-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 -// CHECK-CXX-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !2 +// CHECK-CXX-NEXT: [[TMP2:%.*]] = call { i64, i64, i64, i64, i64, i64, i64, i64 } @llvm.aarch64.ld64b(i8* [[TMP1]]), !noalias !3 // CHECK-CXX-NEXT: [[TMP3:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 0 -// CHECK-CXX-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP3]], i64* [[ARRAYDECAY_I]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP4:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 1 // CHECK-CXX-NEXT: [[TMP5:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 1 -// CHECK-CXX-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP5]], i64* [[TMP4]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP6:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 2 // CHECK-CXX-NEXT: [[TMP7:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 2 -// CHECK-CXX-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP7]], i64* [[TMP6]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP8:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 3 // CHECK-CXX-NEXT: [[TMP9:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 3 -// CHECK-CXX-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP9]], i64* [[TMP8]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP10:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 4 // CHECK-CXX-NEXT: [[TMP11:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 4 -// CHECK-CXX-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP11]], i64* [[TMP10]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP12:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 5 // CHECK-CXX-NEXT: [[TMP13:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 5 -// CHECK-CXX-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP13]], i64* [[TMP12]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP14:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 6 // CHECK-CXX-NEXT: [[TMP15:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 6 -// CHECK-CXX-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP15]], i64* [[TMP14]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP16:%.*]] = getelementptr i64, i64* [[ARRAYDECAY_I]], i32 7 // CHECK-CXX-NEXT: [[TMP17:%.*]] = extractvalue { i64, i64, i64, i64, i64, i64, i64, i64 } [[TMP2]], 7 -// CHECK-CXX-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !2 +// CHECK-CXX-NEXT: store i64 [[TMP17]], i64* [[TMP16]], align 8, !alias.scope !3 // CHECK-CXX-NEXT: [[TMP18:%.*]] = bitcast %struct.data512_t* [[REF_TMP]] to i8* // CHECK-CXX-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 bitcast (%struct.data512_t* @val to i8*), i8* align 8 [[TMP18]], i64 64, i1 false) // CHECK-CXX-NEXT: ret void @@ -101,11 +101,11 @@ // CHECK-C-NEXT: entry: // CHECK-C-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-C-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[BYVAL_TEMP]] to i8* // CHECK-C-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[BYVAL_TEMP]], i32 0, i32 0 // CHECK-C-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-C-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 @@ -130,11 +130,11 @@ // CHECK-CXX-NEXT: entry: // CHECK-CXX-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-CXX-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[AGG_TMP]] to i8* // CHECK-CXX-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[AGG_TMP]], i32 0, i32 0 // CHECK-CXX-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-CXX-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 @@ -164,11 +164,11 @@ // CHECK-C-NEXT: entry: // CHECK-C-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-C-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[BYVAL_TEMP]] to i8* // CHECK-C-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[BYVAL_TEMP]], i32 0, i32 0 // CHECK-C-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-C-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 @@ -194,11 +194,11 @@ // CHECK-CXX-NEXT: entry: // CHECK-CXX-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-CXX-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[AGG_TMP]] to i8* // CHECK-CXX-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[AGG_TMP]], i32 0, i32 0 // CHECK-CXX-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-CXX-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 @@ -229,11 +229,11 @@ // CHECK-C-NEXT: entry: // CHECK-C-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-C-NEXT: [[BYVAL_TEMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-C-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[BYVAL_TEMP]] to i8* // CHECK-C-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-C-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-C-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-C-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[BYVAL_TEMP]], i32 0, i32 0 // CHECK-C-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-C-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 @@ -259,11 +259,11 @@ // CHECK-CXX-NEXT: entry: // CHECK-CXX-NEXT: [[__ADDR_ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-CXX-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_DATA512_T:%.*]], align 8 -// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8 +// CHECK-CXX-NEXT: [[TMP0:%.*]] = load i8*, i8** @addr, align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[TMP1:%.*]] = bitcast %struct.data512_t* [[AGG_TMP]] to i8* // CHECK-CXX-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP1]], i8* align 8 bitcast (%struct.data512_t* @val to i8*), i64 64, i1 false) // CHECK-CXX-NEXT: store i8* [[TMP0]], i8** [[__ADDR_ADDR_I]], align 8 -// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8 +// CHECK-CXX-NEXT: [[TMP2:%.*]] = load i8*, i8** [[__ADDR_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-CXX-NEXT: [[VAL_I:%.*]] = getelementptr inbounds [[STRUCT_DATA512_T]], %struct.data512_t* [[AGG_TMP]], i32 0, i32 0 // CHECK-CXX-NEXT: [[ARRAYDECAY_I:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[VAL_I]], i64 0, i64 0 // CHECK-CXX-NEXT: [[TMP3:%.*]] = load i64, i64* [[ARRAYDECAY_I]], align 8 diff --git a/clang/test/CodeGen/memcpy-inline-builtin.c b/clang/test/CodeGen/memcpy-inline-builtin.c --- a/clang/test/CodeGen/memcpy-inline-builtin.c +++ b/clang/test/CodeGen/memcpy-inline-builtin.c @@ -26,16 +26,16 @@ // CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8 // CHECK-NEXT: store ptr [[B:%.*]], ptr [[B_ADDR]], align 8 // CHECK-NEXT: store i64 [[C:%.*]], ptr [[C_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[C_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK-NEXT: store ptr [[TMP0]], ptr [[A_ADDR_I]], align 8 // CHECK-NEXT: store ptr [[TMP1]], ptr [[B_ADDR_I]], align 8 // CHECK-NEXT: store i64 [[TMP2]], ptr [[C_ADDR_I]], align 8 -// CHECK-NEXT: call void asm sideeffect "# memcpy.inline marker", "~{dirflag},~{fpsr},~{flags}"() #[[ATTR3:[0-9]+]], !srcloc !2 -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR_I]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_I]], align 8 -// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[C_ADDR_I]], align 8 +// CHECK-NEXT: call void asm sideeffect "# memcpy.inline marker", "~{dirflag},~{fpsr},~{flags}"() #[[ATTR3:[0-9]+]], !srcloc !3 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR_I]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR_I]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[C_ADDR_I]], align 8, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 1 [[TMP3]], ptr align 1 [[TMP4]], i64 [[TMP5]], i1 false) // CHECK-NEXT: ret ptr [[TMP3]] // @@ -52,15 +52,15 @@ // CHECK-NEXT: store ptr [[A:%.*]], ptr [[A_ADDR]], align 8 // CHECK-NEXT: store ptr [[B:%.*]], ptr [[B_ADDR]], align 8 // CHECK-NEXT: store i64 [[C:%.*]], ptr [[C_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[C_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], 10 // CHECK-NEXT: [[TMP1:%.*]] = zext i1 [[CMP]] to i64 // CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], ptr @memcpy, ptr @foo // CHECK-NEXT: store ptr [[COND]], ptr [[CPY]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[CPY]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[C_ADDR]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[CPY]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CALL:%.*]] = call ptr [[TMP2]](ptr noundef [[TMP3]], ptr noundef [[TMP4]], i64 noundef [[TMP5]]) // CHECK-NEXT: ret void // diff --git a/clang/test/CodeGen/tbaa-array.cpp b/clang/test/CodeGen/tbaa-array.cpp --- a/clang/test/CodeGen/tbaa-array.cpp +++ b/clang/test/CodeGen/tbaa-array.cpp @@ -13,34 +13,35 @@ int foo(B *b) { // CHECK-LABEL: _Z3fooP1B -// CHECK: load i32, {{.*}}, !tbaa [[TAG_A_i:!.*]] +// CHECK: load i32, {{.*}}, !tbaa [[TAG_A_i:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] // CHECK-NEW-LABEL: _Z3fooP1B -// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_A_i:!.*]] +// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_A_i:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] return b->a->i; } // Check that members of array types are represented correctly. int bar(C *c) { // CHECK-NEW-LABEL: _Z3barP1C -// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_C_i:!.*]] +// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_C_i:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] return c->i; } int bar2(C *c) { // CHECK-NEW-LABEL: _Z4bar2P1C -// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_int:!.*]] +// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_int:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] return c->x[2]; } int bar3(C *c, int j) { // CHECK-NEW-LABEL: _Z4bar3P1Ci -// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_int:!.*]] +// CHECK-NEW: load i32, {{.*}}, !tbaa [[TAG_int:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] return c->x[j]; } // CHECK-DAG: [[TAG_A_i]] = !{[[TYPE_A:!.*]], [[TYPE_int:!.*]], i64 0} // CHECK-DAG: [[TYPE_A]] = !{!"_ZTS1A", !{{.*}}, i64 0} // CHECK-DAG: [[TYPE_int]] = !{!"int", !{{.*}}, i64 0} +// CHECK-DAG: [[NOUNDEF]] = !{} // CHECK-NEW-DAG: [[TYPE_char:!.*]] = !{{{.*}}, i64 1, !"omnipotent char"} // CHECK-NEW-DAG: [[TYPE_int:!.*]] = !{[[TYPE_char]], i64 4, !"int"} @@ -50,3 +51,4 @@ // CHECK-NEW-DAG: [[TAG_A_i]] = !{[[TYPE_A]], [[TYPE_int]], i64 0, i64 4} // CHECK-NEW-DAG: [[TYPE_C:!.*]] = !{[[TYPE_char]], i64 16, !"_ZTS1C", [[TYPE_int]], i64 0, i64 4, [[TYPE_int]], i64 4, i64 12} // CHECK-NEW-DAG: [[TAG_C_i]] = !{[[TYPE_C:!.*]], [[TYPE_int:!.*]], i64 0, i64 4} +// CHECK-NEW-DAG: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/tbaa-base.cpp b/clang/test/CodeGen/tbaa-base.cpp --- a/clang/test/CodeGen/tbaa-base.cpp +++ b/clang/test/CodeGen/tbaa-base.cpp @@ -19,7 +19,7 @@ int f1(B *b) { // CHECK-LABEL: _Z2f1P1B -// CHECK: load i32, {{.*}}, !tbaa [[TAG_A_y:!.*]] +// CHECK: load i32, {{.*}}, !tbaa [[TAG_A_y:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] return b->y; } @@ -52,7 +52,9 @@ // OLD-PATH-DAG: [[TYPE_int:!.*]] = !{!"int", [[TYPE_char]], i64 0} // OLD-PATH-DAG: [[TYPE_A:!.*]] = !{!"_ZTS1A", [[TYPE_int]], i64 0, [[TYPE_int]], i64 4, [[TYPE_int]], i64 8} // OLD-PATH-DAG: [[TAG_A_y]] = !{[[TYPE_A]], [[TYPE_int]], i64 4} +// OLD-PATH-DAG: [[NOUNDEF]] = !{} // NEW-PATH-DAG: [[TYPE_char:!.*]] = !{{{.*}}, i64 1, !"omnipotent char"} // NEW-PATH-DAG: [[TYPE_int:!.*]] = !{[[TYPE_char]], i64 4, !"int"} // NEW-PATH-DAG: [[TYPE_A:!.*]] = !{[[TYPE_char]], i64 12, !"_ZTS1A", [[TYPE_int]], i64 0, i64 4, [[TYPE_int]], i64 4, i64 4, [[TYPE_int]], i64 8, i64 4} // NEW-PATH-DAG: [[TAG_A_y]] = !{[[TYPE_A]], [[TYPE_int]], i64 4, i64 4} +// NEW-PATH-DAG: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGen/tbaa.cpp b/clang/test/CodeGen/tbaa.cpp --- a/clang/test/CodeGen/tbaa.cpp +++ b/clang/test/CodeGen/tbaa.cpp @@ -208,9 +208,9 @@ char g13(struct five *a, struct five *b) { return a->b; // CHECK-LABEL: define{{.*}} signext i8 @_Z3g13 -// CHECK: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_char:!.*]] +// CHECK: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_char:![0-9]+]], !noundef [[TAG_NOUNDEF:![0-9]+]] // PATH-LABEL: define{{.*}} signext i8 @_Z3g13 -// PATH: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_five_b:!.*]] +// PATH: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_five_b:![0-9]+]], !noundef [[TAG_NOUNDEF:![0-9]+]] } struct six { @@ -223,7 +223,7 @@ // CHECK-LABEL: define{{.*}} signext i8 @_Z3g14 // CHECK: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_char]] // PATH-LABEL: define{{.*}} signext i8 @_Z3g14 -// PATH: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_six_b:!.*]] +// PATH: load i8, ptr %{{.*}}, align 1, !tbaa [[TAG_six_b:![0-9]+]], !noundef [[TAG_NOUNDEF:![0-9]+]] return a->b; } diff --git a/clang/test/CodeGen/ubsan-pass-object-size.c b/clang/test/CodeGen/ubsan-pass-object-size.c --- a/clang/test/CodeGen/ubsan-pass-object-size.c +++ b/clang/test/CodeGen/ubsan-pass-object-size.c @@ -6,7 +6,7 @@ // CHECK: [[SIZE_ALLOCA:%.*]] = alloca i64, align 8 // CHECK: store i64 %{{.*}}, ptr [[SIZE_ALLOCA]], align 8 - // CHECK: [[LOAD_SIZE:%.*]] = load i64, ptr [[SIZE_ALLOCA]], align 8, !nosanitize + // CHECK: [[LOAD_SIZE:%.*]] = load i64, ptr [[SIZE_ALLOCA]], align 8, !noundef ![[NOUNDEF:[0-9]+]], !nosanitize // CHECK-NEXT: [[SCALED_SIZE:%.*]] = udiv i64 [[LOAD_SIZE]], 4, !nosanitize // CHECK-NEXT: [[SEXT_N:%.*]] = sext i32 %{{.*}} to i64, !nosanitize // CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[SEXT_N]], [[SCALED_SIZE]], !nosanitize @@ -66,3 +66,5 @@ // CHECK: ret i32 return p[n]; } + +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGenCXX/attr-likelihood-if-branch-weights.cpp b/clang/test/CodeGenCXX/attr-likelihood-if-branch-weights.cpp --- a/clang/test/CodeGenCXX/attr-likelihood-if-branch-weights.cpp +++ b/clang/test/CodeGenCXX/attr-likelihood-if-branch-weights.cpp @@ -8,7 +8,7 @@ bool f() { // CHECK-LABEL: define{{.*}} zeroext i1 @_Z1fv - // CHECK: br {{.*}} !prof !7 + // CHECK: br {{.*}} !prof [[PROF7:![0-9]+]] if (b) [[likely]] { return A(); @@ -18,7 +18,7 @@ bool g() { // CHECK-LABEL: define{{.*}} zeroext i1 @_Z1gv - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8:![0-9]+]] if (b) [[unlikely]] { return A(); @@ -29,7 +29,7 @@ bool h() { // CHECK-LABEL: define{{.*}} zeroext i1 @_Z1hv - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] return A(); @@ -38,7 +38,7 @@ void NullStmt() { // CHECK-LABEL: define{{.*}}NullStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]]; else { @@ -49,7 +49,7 @@ void IfStmt() { // CHECK-LABEL: define{{.*}}IfStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] if (B()) {} @@ -63,20 +63,20 @@ void WhileStmt() { // CHECK-LABEL: define{{.*}}WhileStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] while (B()) {} // CHECK-NOT: br {{.*}} %if.end{{.*}} !prof if (b) - // CHECK: br {{.*}} !prof !7 + // CHECK: br {{.*}} !prof [[PROF7]] while (B()) [[unlikely]] { b = false; } } void DoStmt() { // CHECK-LABEL: define{{.*}}DoStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] do {} while (B()) @@ -91,20 +91,20 @@ void ForStmt() { // CHECK-LABEL: define{{.*}}ForStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] for (; B();) {} // CHECK-NOT: br {{.*}} %if.end{{.*}} !prof if (b) - // CHECK: br {{.*}} !prof !7 + // CHECK: br {{.*}} !prof [[PROF7]] for (; B();) [[unlikely]] {} } void GotoStmt() { // CHECK-LABEL: define{{.*}}GotoStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] goto end; else { @@ -116,7 +116,7 @@ void ReturnStmt() { // CHECK-LABEL: define{{.*}}ReturnStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] return; else { @@ -127,7 +127,7 @@ void SwitchStmt() { // CHECK-LABEL: define{{.*}}SwitchStmt - // CHECK: br {{.*}} !prof !8 + // CHECK: br {{.*}} !prof [[PROF8]] if (b) [[unlikely]] switch (i) {} else { @@ -144,5 +144,5 @@ } } -// CHECK: !7 = !{!"branch_weights", i32 [[UNLIKELY]], i32 [[LIKELY]]} -// CHECK: !8 = !{!"branch_weights", i32 [[LIKELY]], i32 [[UNLIKELY]]} +// CHECK: [[PROF7]] = !{!"branch_weights", i32 [[UNLIKELY]], i32 [[LIKELY]]} +// CHECK: [[PROF8]] = !{!"branch_weights", i32 [[LIKELY]], i32 [[UNLIKELY]]} diff --git a/clang/test/CodeGenCXX/attr-likelihood-switch-branch-weights.cpp b/clang/test/CodeGenCXX/attr-likelihood-switch-branch-weights.cpp --- a/clang/test/CodeGenCXX/attr-likelihood-switch-branch-weights.cpp +++ b/clang/test/CodeGenCXX/attr-likelihood-switch-branch-weights.cpp @@ -5,10 +5,10 @@ // CHECK-LABEL: @_Z8OneCaseLv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2:![0-9]+]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2:![0-9]+]], !noundef [[NOUNDEF6:![0-9]+]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !6 +// CHECK-NEXT: ], !prof [[PROF7:![0-9]+]] // CHECK: sw.epilog: // CHECK-NEXT: ret void // @@ -20,12 +20,12 @@ // CHECK-LABEL: @_Z8OneCaseUv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_BB:%.*]] -// CHECK-NEXT: ], !prof !7 +// CHECK-NEXT: ], !prof [[PROF8:![0-9]+]] // CHECK: sw.bb: -// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK-NEXT: store volatile i32 [[INC]], ptr @i, align 4, !tbaa [[TBAA2]] // CHECK-NEXT: br label [[SW_EPILOG]] @@ -40,11 +40,11 @@ // CHECK-LABEL: @_Z10TwoCasesLNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !8 +// CHECK-NEXT: ], !prof [[PROF9:![0-9]+]] // CHECK: sw.epilog: // CHECK-NEXT: ret void // @@ -57,11 +57,11 @@ // CHECK-LABEL: @_Z10TwoCasesUNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !9 +// CHECK-NEXT: ], !prof [[PROF10:![0-9]+]] // CHECK: sw.epilog: // CHECK-NEXT: ret void // @@ -74,11 +74,11 @@ // CHECK-LABEL: @_Z10TwoCasesLUv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !10 +// CHECK-NEXT: ], !prof [[PROF11:![0-9]+]] // CHECK: sw.epilog: // CHECK-NEXT: ret void // @@ -91,13 +91,13 @@ // CHECK-LABEL: @_Z20CasesFallthroughNNLNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_BB:%.*]] // CHECK-NEXT: i32 2, label [[SW_BB]] // CHECK-NEXT: i32 3, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 4, label [[SW_BB1]] -// CHECK-NEXT: ], !prof !11 +// CHECK-NEXT: ], !prof [[PROF12:![0-9]+]] // CHECK: sw.bb: // CHECK-NEXT: br label [[SW_BB1]] // CHECK: sw.bb1: @@ -116,13 +116,13 @@ // CHECK-LABEL: @_Z20CasesFallthroughNNUNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_BB:%.*]] // CHECK-NEXT: i32 2, label [[SW_BB]] // CHECK-NEXT: i32 3, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 4, label [[SW_BB1]] -// CHECK-NEXT: ], !prof !12 +// CHECK-NEXT: ], !prof [[PROF13:![0-9]+]] // CHECK: sw.bb: // CHECK-NEXT: br label [[SW_BB1]] // CHECK: sw.bb1: @@ -141,7 +141,7 @@ // CHECK-LABEL: @_Z28CasesFallthroughRangeSmallLNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_BB:%.*]] // CHECK-NEXT: i32 2, label [[SW_BB]] @@ -151,9 +151,9 @@ // CHECK-NEXT: i32 102, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 103, label [[SW_BB2:%.*]] // CHECK-NEXT: i32 104, label [[SW_BB2]] -// CHECK-NEXT: ], !prof !13 +// CHECK-NEXT: ], !prof [[PROF14:![0-9]+]] // CHECK: sw.bb: -// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK-NEXT: store volatile i32 [[INC]], ptr @i, align 4, !tbaa [[TBAA2]] // CHECK-NEXT: br label [[SW_BB1]] @@ -175,7 +175,7 @@ // CHECK-LABEL: @_Z28CasesFallthroughRangeSmallUNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_EPILOG:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_BB:%.*]] // CHECK-NEXT: i32 2, label [[SW_BB]] @@ -185,9 +185,9 @@ // CHECK-NEXT: i32 102, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 103, label [[SW_BB2:%.*]] // CHECK-NEXT: i32 104, label [[SW_BB2]] -// CHECK-NEXT: ], !prof !14 +// CHECK-NEXT: ], !prof [[PROF15:![0-9]+]] // CHECK: sw.bb: -// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK-NEXT: store volatile i32 [[INC]], ptr @i, align 4, !tbaa [[TBAA2]] // CHECK-NEXT: br label [[SW_BB1]] @@ -209,11 +209,11 @@ // CHECK-LABEL: @_Z29CasesFallthroughRangeLargeLLNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_CASERANGE:%.*]] [ // CHECK-NEXT: i32 1003, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 104, label [[SW_BB1]] -// CHECK-NEXT: ], !prof !8 +// CHECK-NEXT: ], !prof [[PROF9]] // CHECK: sw.bb: // CHECK-NEXT: br label [[SW_BB1]] // CHECK: sw.bb1: @@ -236,11 +236,11 @@ // CHECK-LABEL: @_Z29CasesFallthroughRangeLargeUUNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_CASERANGE:%.*]] [ // CHECK-NEXT: i32 1003, label [[SW_BB1:%.*]] // CHECK-NEXT: i32 104, label [[SW_BB1]] -// CHECK-NEXT: ], !prof !9 +// CHECK-NEXT: ], !prof [[PROF10]] // CHECK: sw.bb: // CHECK-NEXT: br label [[SW_BB1]] // CHECK: sw.bb1: @@ -263,10 +263,10 @@ // CHECK-LABEL: @_Z15OneCaseDefaultLv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_DEFAULT:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG:%.*]] -// CHECK-NEXT: ], !prof !15 +// CHECK-NEXT: ], !prof [[PROF16:![0-9]+]] // CHECK: sw.default: // CHECK-NEXT: br label [[SW_EPILOG]] // CHECK: sw.epilog: @@ -281,10 +281,10 @@ // CHECK-LABEL: @_Z15OneCaseDefaultUv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_DEFAULT:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG:%.*]] -// CHECK-NEXT: ], !prof !16 +// CHECK-NEXT: ], !prof [[PROF17:![0-9]+]] // CHECK: sw.default: // CHECK-NEXT: br label [[SW_EPILOG]] // CHECK: sw.epilog: @@ -299,11 +299,11 @@ // CHECK-LABEL: @_Z18TwoCasesDefaultLNLv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_DEFAULT:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG:%.*]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !17 +// CHECK-NEXT: ], !prof [[PROF18:![0-9]+]] // CHECK: sw.default: // CHECK-NEXT: br label [[SW_EPILOG]] // CHECK: sw.epilog: @@ -319,11 +319,11 @@ // CHECK-LABEL: @_Z18TwoCasesDefaultLNNv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_DEFAULT:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG:%.*]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !8 +// CHECK-NEXT: ], !prof [[PROF9]] // CHECK: sw.default: // CHECK-NEXT: br label [[SW_EPILOG]] // CHECK: sw.epilog: @@ -339,11 +339,11 @@ // CHECK-LABEL: @_Z18TwoCasesDefaultLNUv( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]] +// CHECK-NEXT: [[TMP0:%.*]] = load volatile i32, ptr @i, align 4, !tbaa [[TBAA2]], !noundef [[NOUNDEF6]] // CHECK-NEXT: switch i32 [[TMP0]], label [[SW_DEFAULT:%.*]] [ // CHECK-NEXT: i32 1, label [[SW_EPILOG:%.*]] // CHECK-NEXT: i32 2, label [[SW_EPILOG]] -// CHECK-NEXT: ], !prof !18 +// CHECK-NEXT: ], !prof [[PROF19:![0-9]+]] // CHECK: sw.default: // CHECK-NEXT: br label [[SW_EPILOG]] // CHECK: sw.epilog: diff --git a/clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp b/clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp --- a/clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp +++ b/clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp @@ -4,7 +4,7 @@ // CHECK-LABEL: define{{.*}} void @_Z11test_scalarv __builtin_bit_cast(float, 42); - // CHECK: load float, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA:.*]] + // CHECK: load float, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA:.*]], !noundef [[NOUNDEF:![0-9]+]] } void test_scalar2() { @@ -12,13 +12,14 @@ struct S {int m;}; __builtin_bit_cast(int, S{42}); - // CHECK: load i32, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]] + // CHECK: load i32, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]], !noundef [[NOUNDEF]] } int test_same_type(int &r) { - // CHECK: load i32, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]] + // CHECK: load i32, ptr {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]], !noundef [[NOUNDEF]] return __builtin_bit_cast(int, r); } // CHECK: ![[CHAR_TBAA:.*]] = !{!"omnipotent char", {{.*}}, i64 0} // CHECK: ![[MAY_ALIAS_TBAA]] = !{![[CHAR_TBAA]], ![[CHAR_TBAA]], i64 0} +// CHECK: [[NOUNDEF]] = !{} diff --git a/clang/test/CodeGenCXX/debug-info-line.cpp b/clang/test/CodeGenCXX/debug-info-line.cpp --- a/clang/test/CodeGenCXX/debug-info-line.cpp +++ b/clang/test/CodeGenCXX/debug-info-line.cpp @@ -86,7 +86,7 @@ int *src1(); int src2(); #line 800 - int x = ( // CHECK: load {{.*}} !dbg [[DBG_F7:!.*]] + int x = ( // CHECK: load {{.*}} !dbg [[DBG_F7:!.*]], !noundef [[NOUNDEF:![0-9]+]] src1())[src2()]; } @@ -95,7 +95,7 @@ int src1[1]; int src2(); #line 900 - int x = ( // CHECK: load {{.*}} !dbg [[DBG_F8:!.*]] + int x = ( // CHECK: load {{.*}} !dbg [[DBG_F8:!.*]], !noundef [[NOUNDEF]] src1)[src2()]; } @@ -300,6 +300,7 @@ #line 2700 f25_a(); } +// CHECK: [[NOUNDEF]] = !{} // CHECK: [[DBG_F1]] = !DILocation(line: 100, // CHECK: [[DBG_FOO_VALUE]] = !DILocation(line: 200, // CHECK: [[DBG_FOO_REF]] = !DILocation(line: 202, diff --git a/clang/test/CodeGenCXX/pr12251.cpp b/clang/test/CodeGenCXX/pr12251.cpp --- a/clang/test/CodeGenCXX/pr12251.cpp +++ b/clang/test/CodeGenCXX/pr12251.cpp @@ -5,7 +5,7 @@ return *x; } // CHECK-LABEL: define{{.*}} zeroext i1 @_Z1fPb -// CHECK: load i8, ptr %{{[^ ]*}}, align 1, !range [[RANGE_i8_0_2:![^ ]*]] +// CHECK: load i8, ptr %{{[^ ]*}}, align 1, !range [[RANGE_i8_0_2:![0-9]+]], !noundef [[NOUNDEF:![0-9]+]] // Only enum-tests follow. Ensure that after the bool test, no further range // metadata shows up when strict enums are disabled. @@ -32,42 +32,42 @@ return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g3P2e3 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_0_32:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_0_32:![0-9]+]], !noundef [[NOUNDEF]] enum e4 { e4_a = -16}; e4 g4(e4 *x) { return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g4P2e4 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m16_16:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m16_16:![0-9]+]], !noundef [[NOUNDEF]] enum e5 { e5_a = -16, e5_b = 16}; e5 g5(e5 *x) { return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g5P2e5 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m32_32:![0-9]+]], !noundef [[NOUNDEF]] enum e6 { e6_a = -1 }; e6 g6(e6 *x) { return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g6P2e6 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m1_1:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m1_1:![0-9]+]], !noundef [[NOUNDEF]] enum e7 { e7_a = -16, e7_b = 2}; e7 g7(e7 *x) { return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g7P2e7 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m16_16]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m16_16]], !noundef [[NOUNDEF]] enum e8 { e8_a = -17}; e8 g8(e8 *x) { return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z2g8P2e8 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m32_32:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m32_32:![0-9]+]], !noundef [[NOUNDEF]] enum e9 { e9_a = 17}; e9 g9(e9 *x) { @@ -81,21 +81,21 @@ return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z3g10P3e10 -// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m64_64:![^ ]*]] +// CHECK: load i32, ptr %x, align 4, !range [[RANGE_i32_m64_64:![0-9]+]], !noundef [[NOUNDEF]] enum e11 {e11_a = 4294967296 }; enum e11 g11(enum e11 *x) { return *x; } // CHECK-LABEL: define{{.*}} i64 @_Z3g11P3e11 -// CHECK: load i64, ptr %x, align {{[84]}}, !range [[RANGE_i64_0_2pow33:![^ ]*]] +// CHECK: load i64, ptr %x, align {{[84]}}, !range [[RANGE_i64_0_2pow33:![0-9]+]], !noundef [[NOUNDEF]] enum e12 {e12_a = 9223372036854775808U }; enum e12 g12(enum e12 *x) { return *x; } // CHECK-LABEL: define{{.*}} i64 @_Z3g12P3e12 -// CHECK: load i64, ptr %x, align {{[84]}} +// CHECK: load i64, ptr %x, align {{[84]}}, !noundef [[NOUNDEF]] // CHECK-NOT: range // CHECK: ret @@ -104,7 +104,7 @@ return *x; } // CHECK-LABEL: define{{.*}} signext i8 @_Z3g13P3e13 -// CHECK: load i8, ptr %x, align 1 +// CHECK: load i8, ptr %x, align 1, !noundef [[NOUNDEF]] // CHECK-NOT: range // CHECK: ret @@ -113,7 +113,7 @@ return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z3g14P3e14 -// CHECK: load i32, ptr %x, align 4 +// CHECK: load i32, ptr %x, align 4, !noundef [[NOUNDEF]] // CHECK-NOT: range // CHECK: ret @@ -122,7 +122,7 @@ return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z3g15P3e15 -// CHECK: load i32, ptr %x, align 4 +// CHECK: load i32, ptr %x, align 4, !noundef [[NOUNDEF]] // CHECK-NOT: range // CHECK: ret @@ -131,12 +131,12 @@ return *x; } // CHECK-LABEL: define{{.*}} i32 @_Z3g16P3e16 -// CHECK: load i32, ptr %x, align 4 +// CHECK: load i32, ptr %x, align 4, !noundef [[NOUNDEF]] // CHECK-NOT: range // CHECK: ret - // CHECK: [[RANGE_i8_0_2]] = !{i8 0, i8 2} +// CHECK: [[NOUNDEF]] = !{} // CHECK: [[RANGE_i32_0_32]] = !{i32 0, i32 32} // CHECK: [[RANGE_i32_m16_16]] = !{i32 -16, i32 16} // CHECK: [[RANGE_i32_m32_32]] = !{i32 -32, i32 32} diff --git a/clang/test/CodeGenCXX/pragma-followup_inner.cpp b/clang/test/CodeGenCXX/pragma-followup_inner.cpp --- a/clang/test/CodeGenCXX/pragma-followup_inner.cpp +++ b/clang/test/CodeGenCXX/pragma-followup_inner.cpp @@ -15,28 +15,29 @@ // CHECK-LABEL: define{{.*}} void @followup_inner -// CHECK: br label %for.cond1, !llvm.loop ![[INNERLOOP_3:[0-9]+]] -// CHECK: br label %for.cond, !llvm.loop ![[OUTERLOOP_9:[0-9]+]] +// CHECK: br label %for.cond1, !llvm.loop [[INNERLOOP_3:![0-9]+]] +// CHECK: br label %for.cond, !llvm.loop [[OUTERLOOP_9:![0-9]+]] -// CHECK-DAG: ![[ACCESSGROUP_2:[0-9]+]] = distinct !{} +// CHECK-DAG: [[NOUNDEF:![0-9]+]] = !{} +// CHECK-DAG: [[ACCESSGROUP_2:![0-9]+]] = distinct !{} -// CHECK-DAG: ![[INNERLOOP_3:[0-9]+]] = distinct !{![[INNERLOOP_3:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[DISTRIBUTE_5:[0-9]+]], ![[DISTRIBUTE_FOLLOWUP_6:[0-9]+]]} -// CHECK-DAG: ![[PARALLEL_ACCESSES_4:[0-9]+]] = !{!"llvm.loop.parallel_accesses", !2} -// CHECK-DAG: ![[DISTRIBUTE_5:[0-9]+]] = !{!"llvm.loop.distribute.enable", i1 true} -// CHECK-DAG: ![[DISTRIBUTE_FOLLOWUP_6:[0-9]+]] = !{!"llvm.loop.distribute.followup_all", ![[LOOP_7:[0-9]+]]} +// CHECK-DAG: [[INNERLOOP_3]] = distinct !{[[INNERLOOP_3]], [[PARALLEL_ACCESSES_4:![0-9]+]], [[DISTRIBUTE_5:![0-9]+]], [[DISTRIBUTE_FOLLOWUP_6:![0-9]+]]} +// CHECK-DAG: [[PARALLEL_ACCESSES_4]] = !{!"llvm.loop.parallel_accesses", [[ACCESSGROUP_2]]} +// CHECK-DAG: [[DISTRIBUTE_5]] = !{!"llvm.loop.distribute.enable", i1 true} +// CHECK-DAG: [[DISTRIBUTE_FOLLOWUP_6]] = !{!"llvm.loop.distribute.followup_all", [[LOOP_7:![0-9]+]]} -// CHECK-DAG: ![[LOOP_7:[0-9]+]] = distinct !{![[LOOP_7:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[VECTORIZE_8:[0-9]+]]} -// CHECK-DAG: ![[VECTORIZE_8:[0-9]+]] = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK-DAG: [[LOOP_7]] = distinct !{[[LOOP_7]], [[PARALLEL_ACCESSES_4]], [[VECTORIZE_8:![0-9]+]]} +// CHECK-DAG: [[VECTORIZE_8]] = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK-DAG: ![[OUTERLOOP_9:[0-9]+]] = distinct !{![[OUTERLOOP_9:[0-9]+]], [[MP:![0-9]+]], ![[UNROLLANDJAM_COUNT_10:[0-9]+]], ![[UNROLLANDJAM_FOLLOWUPINNER_11:[0-9]+]]} -// CHECK-DAG: ![[UNROLLANDJAM_COUNT_10:[0-9]+]] = !{!"llvm.loop.unroll_and_jam.count", i32 4} -// CHECK-DAG: ![[UNROLLANDJAM_FOLLOWUPINNER_11:[0-9]+]] = !{!"llvm.loop.unroll_and_jam.followup_inner", !13} +// CHECK-DAG: [[OUTERLOOP_9]] = distinct !{[[OUTERLOOP_9]], [[MP:![0-9]+]], [[UNROLLANDJAM_COUNT_10:![0-9]+]], [[UNROLLANDJAM_FOLLOWUPINNER_11:![0-9]+]]} +// CHECK-DAG: [[UNROLLANDJAM_COUNT_10]] = !{!"llvm.loop.unroll_and_jam.count", i32 4} +// CHECK-DAG: [[UNROLLANDJAM_FOLLOWUPINNER_11]] = !{!"llvm.loop.unroll_and_jam.followup_inner", [[LOOP_12:![0-9]+]]} -// CHECK-DAG: ![[LOOP_12:[0-9]+]] = distinct !{![[LOOP_12:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[ISVECTORIZED_13:[0-9]+]], ![[UNROLL_COUNT_13:[0-9]+]], ![[UNROLL_FOLLOWUP_14:[0-9]+]]} -// CHECK-DAG: ![[ISVECTORIZED_13:[0-9]+]] = !{!"llvm.loop.isvectorized"} -// CHECK-DAG: ![[UNROLL_COUNT_13:[0-9]+]] = !{!"llvm.loop.unroll.count", i32 4} -// CHECK-DAG: ![[UNROLL_FOLLOWUP_14:[0-9]+]] = !{!"llvm.loop.unroll.followup_all", ![[LOOP_15:[0-9]+]]} +// CHECK-DAG: [[LOOP_12]] = distinct !{[[LOOP_12]], [[PARALLEL_ACCESSES_4]], [[ISVECTORIZED_13:![0-9]+]], [[UNROLL_COUNT_13:![0-9]+]], [[UNROLL_FOLLOWUP_14:![0-9]+]]} +// CHECK-DAG: [[ISVECTORIZED_13]] = !{!"llvm.loop.isvectorized"} +// CHECK-DAG: [[UNROLL_COUNT_13]] = !{!"llvm.loop.unroll.count", i32 4} +// CHECK-DAG: [[UNROLL_FOLLOWUP_14:![0-9]+]] = !{!"llvm.loop.unroll.followup_all", [[LOOP_15:![0-9]+]]} -// CHECK-DAG: ![[LOOP_15:[0-9]+]] = distinct !{![[LOOP_15:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[ISVECTORIZED_13:[0-9]+]], ![[UNROLL_DISABLE_16:[0-9]+]], ![[PIPELINE_17:[0-9]+]]} -// CHECK-DAG: ![[UNROLL_DISABLE_16:[0-9]+]] = !{!"llvm.loop.unroll.disable"} -// CHECK-DAG: ![[PIPELINE_17:[0-9]+]] = !{!"llvm.loop.pipeline.initiationinterval", i32 10} +// CHECK-DAG: [[LOOP_15]] = distinct !{[[LOOP_15]], [[PARALLEL_ACCESSES_4]], [[ISVECTORIZED_13]], [[UNROLL_DISABLE_16:![0-9]+]], [[PIPELINE_17:![0-9]+]]} +// CHECK-DAG: [[UNROLL_DISABLE_16]] = !{!"llvm.loop.unroll.disable"} +// CHECK-DAG: [[PIPELINE_17]] = !{!"llvm.loop.pipeline.initiationinterval", i32 10} diff --git a/clang/test/OpenMP/cancel_codegen.cpp b/clang/test/OpenMP/cancel_codegen.cpp --- a/clang/test/OpenMP/cancel_codegen.cpp +++ b/clang/test/OpenMP/cancel_codegen.cpp @@ -116,20 +116,20 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 // CHECK1-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0 // CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP7]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] @@ -144,7 +144,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -158,20 +158,20 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_3]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_4]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_4]], ptr [[DOTOMP_SECTIONS_LB_1]], ptr [[DOTOMP_SECTIONS_UB_2]], ptr [[DOTOMP_SECTIONS_ST_3]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], 1 // CHECK1-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 1 // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_SECTIONS_UB_2]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_1]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_SECTIONS_IV_5]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND6:%.*]] // CHECK1: omp.inner.for.cond6: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY8:%.*]], label [[OMP_INNER_FOR_END18:%.*]] // CHECK1: omp.inner.for.body8: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_SECTIONS_EXIT15:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE9:%.*]] // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE12:%.*]] @@ -198,7 +198,7 @@ // CHECK1: .omp.sections.exit15: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] // CHECK1: omp.inner.for.inc16: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC17:%.*]] = add nsw i32 [[TMP22]], 1 // CHECK1-NEXT: store i32 [[INC17]], ptr [[DOTOMP_SECTIONS_IV_5]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND6]] @@ -207,51 +207,51 @@ // CHECK1-NEXT: br label [[CANCEL_CONT20:%.*]] // CHECK1: cancel.cont20: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB5]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP23]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB22:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB22]], ptr [[DOTCAPTURE_EXPR_21]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP23:%.*]] = icmp slt i32 0, [[TMP25]] // CHECK1-NEXT: br i1 [[CMP23]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB6:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP25:%.*]] = icmp sgt i32 [[TMP27]], [[TMP28]] // CHECK1-NEXT: br i1 [[CMP25]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP29]], [[COND_TRUE]] ], [ [[TMP30]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]] // CHECK1: omp.inner.for.cond26: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]] // CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END33:%.*]] // CHECK1: omp.inner.for.body28: -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP34]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I24]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = load float, ptr @flag, align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load float, ptr @flag, align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP35]], 0.000000e+00 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -272,7 +272,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]] // CHECK1: omp.inner.for.inc31: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP38]], 1 // CHECK1-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND26]] @@ -289,14 +289,14 @@ // CHECK1: cancel.cont35: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]]) // CHECK1-NEXT: [[TMP39:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP39]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP39]]) +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP39]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP39]]) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..2) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..3) // CHECK1-NEXT: store i32 0, ptr [[R]], align 4 // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..4, ptr [[ARGC_ADDR]], ptr [[R]]) -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK1-NEXT: ret i32 [[TMP43]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: ret i32 [[TMP42]] // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. @@ -312,18 +312,18 @@ // CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP2]], 0.000000e+00 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP4]], i32 1) // CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK1-NEXT: br i1 [[TMP6]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] // CHECK1: .cancel.exit: // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB2]], i32 [[TMP8]]) // CHECK1-NEXT: br label [[RETURN:%.*]] // CHECK1: .cancel.continue: @@ -331,27 +331,27 @@ // CHECK1: omp_if.else: // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP10]] to i8 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 0 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 0 // CHECK1-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX1]], align 1 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP14]]) // CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] // CHECK1: .cancel.exit2: // CHECK1-NEXT: br label [[RETURN]] // CHECK1: .cancel.continue3: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds ptr, ptr [[TMP18]], i64 0 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[ARRAYIDX4]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[ARRAYIDX4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[TMP19]], i64 0 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX5]], align 1 +// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX5]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV6:%.*]] = sext i8 [[TMP20]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV6]], [[TMP17]] // CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD]] to i8 @@ -375,35 +375,35 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP11]], i32 4) -// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 -// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP9]], i32 4) +// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 +// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] // CHECK1: .cancel.exit.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: .cancel.continue.i: -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: ret i32 0 // // @@ -424,22 +424,22 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0 // CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] @@ -454,7 +454,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -485,22 +485,22 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1 // CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE1:%.*]] @@ -524,7 +524,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -563,70 +563,70 @@ // CHECK1-NEXT: store ptr [[R]], ptr [[R_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[R_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[R3]], align 4 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB6]], i32 [[TMP7]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4 // CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP17]], i32 2) // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] // CHECK1: .cancel.exit: // CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]] // CHECK1: .cancel.continue: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[R3]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], [[TMP20]] // CHECK1-NEXT: store i32 [[ADD7]], ptr [[R3]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP22]], 1 // CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -634,32 +634,32 @@ // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP24]]) // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[R3]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB7:[0-9]+]], i32 [[TMP28]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP30]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB7:[0-9]+]], i32 [[TMP27]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP28]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[R3]], align 4 -// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP31]], [[TMP32]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] // CHECK1-NEXT: store i32 [[ADD9]], ptr [[TMP1]], align 4 -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB7]], i32 [[TMP28]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB7]], i32 [[TMP27]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: cancel.exit: -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP34]]) +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB6]], i32 [[TMP32]]) // CHECK1-NEXT: br label [[CANCEL_CONT:%.*]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[R3]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP35]] monotonic, align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP34:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP33]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: br label [[OMP_PRECOND_END]] @@ -677,15 +677,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -837,52 +837,52 @@ // CHECK3: omp_section_loop.after19: // CHECK3-NEXT: br label [[OMP_SECTION_LOOP_AFTER19SECTIONS_FINI:%.*]] // CHECK3: omp_section_loop.after19sections.fini: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB35:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB35]], ptr [[DOTCAPTURE_EXPR_34]], align 4 // CHECK3-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP22]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM37:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6:[0-9]+]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM37]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP38:%.*]] = icmp sgt i32 [[TMP24]], [[TMP25]] // CHECK3-NEXT: br i1 [[CMP38]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_34]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP26]], [[COND_TRUE]] ], [ [[TMP27]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP39:%.*]] = icmp sle i32 [[TMP29]], [[TMP30]] // CHECK3-NEXT: br i1 [[CMP39]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP31]], 1 // CHECK3-NEXT: [[ADD40:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD40]], ptr [[I36]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = load float, ptr @flag, align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load float, ptr @flag, align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL41:%.*]] = fcmp une float [[TMP32]], 0.000000e+00 // CHECK3-NEXT: br i1 [[TOBOOL41]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -907,7 +907,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP35]], 1 // CHECK3-NEXT: store i32 [[ADD43]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -928,15 +928,15 @@ // CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[OMP_GLOBAL_THREAD_NUM46]]) // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM47:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB14:[0-9]+]]) // CHECK3-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM47]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.) -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP36]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP36]], i32 0, i32 0 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM48:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB14]]) -// CHECK3-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM48]], ptr [[TMP36]]) +// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM48]], ptr [[TMP36]]) // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..1) // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..2) // CHECK3-NEXT: store i32 0, ptr [[R]], align 4 // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..3, ptr [[ARGC_ADDR]], ptr [[R]]) -// CHECK3-NEXT: [[TMP40:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK3-NEXT: ret i32 [[TMP40]] +// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: ret i32 [[TMP39]] // // // CHECK3-LABEL: define {{[^@]+}}@main..omp_par @@ -952,17 +952,17 @@ // CHECK3-NEXT: [[TID:%.*]] = load i32, ptr [[TID_ADDR_LOCAL]], align 4 // CHECK3-NEXT: br label [[OMP_PAR_REGION:%.*]] // CHECK3: omp.par.region: -// CHECK3-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load float, ptr @flag, align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP2]], 0.000000e+00 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[TMP14:%.*]], label [[TMP3:%.*]] // CHECK3: 3: // CHECK3-NEXT: br label [[TMP4:%.*]] // CHECK3: 4: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP5]] to i8 -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP6]], i64 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 0 // CHECK3-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX3]], align 1 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) @@ -972,12 +972,12 @@ // CHECK3: .cncl5: // CHECK3-NEXT: br label [[OMP_PAR_OUTLINED_EXIT_EXITSTUB:%.*]] // CHECK3: .cont: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[LOADGEP_ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[LOADGEP_ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 0 -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX6]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX7]], align 1 +// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[ARRAYIDX7]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV8:%.*]] = sext i8 [[TMP13]] to i32 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV8]], [[TMP10]] // CHECK3-NEXT: [[CONV9:%.*]] = trunc i32 [[ADD]] to i8 @@ -1016,35 +1016,35 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]]) -// CHECK3-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], i32 4) -// CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 -// CHECK3-NEXT: br i1 [[TMP12]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] +// CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], i32 4) +// CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK3-NEXT: br i1 [[TMP10]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] // CHECK3: .cancel.exit.i: -// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__EXIT:%.*]] // CHECK3: .cancel.continue.i: -// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__EXIT]] // CHECK3: .omp_outlined..exit: -// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK3-NEXT: ret i32 0 // // @@ -1066,20 +1066,20 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB17:[0-9]+]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB15:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP1:%.*]] = icmp slt i32 [[TMP0]], 0 // CHECK3-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[TMP0]], i32 0 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP6]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK3-NEXT: ] @@ -1095,7 +1095,7 @@ // CHECK3: .omp.sections.exit: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1129,20 +1129,20 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21:[0-9]+]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB15]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP1:%.*]] = icmp slt i32 [[TMP0]], 1 // CHECK3-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[TMP0]], i32 1 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP4]], [[TMP5]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP6]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]] @@ -1170,7 +1170,7 @@ // CHECK3: .omp.sections.exit: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK3-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1211,49 +1211,49 @@ // CHECK3-NEXT: store ptr [[R]], ptr [[R_ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[R_ADDR]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK3-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[R3]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25:[0-9]+]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP8]], [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I4]], align 4 @@ -1264,15 +1264,15 @@ // CHECK3: .cancel.exit: // CHECK3-NEXT: br label [[CANCEL_EXIT:%.*]] // CHECK3: .cancel.continue: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I4]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[R3]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP17]], [[TMP16]] // CHECK3-NEXT: store i32 [[ADD8]], ptr [[R3]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1284,15 +1284,15 @@ // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK3-NEXT: store ptr [[R3]], ptr [[TMP19]], align 8 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM12:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB29]]) -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB30:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM12]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP22]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB30:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM12]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[R3]], align 4 -// CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP23]], [[TMP24]] +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] // CHECK3-NEXT: store i32 [[ADD13]], ptr [[TMP1]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB30]], i32 [[OMP_GLOBAL_THREAD_NUM12]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] @@ -1301,8 +1301,8 @@ // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB4]], i32 [[OMP_GLOBAL_THREAD_NUM10]]) // CHECK3-NEXT: br label [[CANCEL_CONT:%.*]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[R3]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP25]] monotonic, align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[R3]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP24:%.*]] = atomicrmw add ptr [[TMP1]], i32 [[TMP23]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: br label [[OMP_PRECOND_END]] @@ -1320,14 +1320,14 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // diff --git a/clang/test/OpenMP/cancellation_point_codegen.cpp b/clang/test/OpenMP/cancellation_point_codegen.cpp --- a/clang/test/OpenMP/cancellation_point_codegen.cpp +++ b/clang/test/OpenMP/cancellation_point_codegen.cpp @@ -118,20 +118,20 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 // CHECK1-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 0 // CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP7]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] @@ -152,7 +152,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -166,20 +166,20 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_5]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_6]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_SECTIONS_IL_6]], ptr [[DOTOMP_SECTIONS_LB_3]], ptr [[DOTOMP_SECTIONS_UB_4]], ptr [[DOTOMP_SECTIONS_ST_5]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_4]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp slt i32 [[TMP13]], 1 // CHECK1-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i32 [[TMP13]], i32 1 // CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_SECTIONS_UB_4]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_3]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_SECTIONS_IV_7]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK1: omp.inner.for.cond8: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_4]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK1-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]] // CHECK1: omp.inner.for.body10: -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_SECTIONS_EXIT19:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE11:%.*]] // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE14:%.*]] @@ -212,7 +212,7 @@ // CHECK1: .omp.sections.exit19: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]] // CHECK1: omp.inner.for.inc20: -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_7]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC21:%.*]] = add nsw i32 [[TMP26]], 1 // CHECK1-NEXT: store i32 [[INC21]], ptr [[DOTOMP_SECTIONS_IV_7]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND8]] @@ -221,47 +221,47 @@ // CHECK1-NEXT: br label [[CANCEL_CONT24:%.*]] // CHECK1: cancel.cont24: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB26]], ptr [[DOTCAPTURE_EXPR_25]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 0, [[TMP29]] // CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP30]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB5:[0-9]+]], i32 [[TMP0]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP29:%.*]] = icmp sgt i32 [[TMP31]], [[TMP32]] // CHECK1-NEXT: br i1 [[CMP29]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP33]], [[COND_TRUE]] ], [ [[TMP34]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP35]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]] // CHECK1: omp.inner.for.cond30: -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP31:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] // CHECK1-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END39:%.*]] // CHECK1: omp.inner.for.body32: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP38]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I28]], align 4 @@ -284,7 +284,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] // CHECK1: omp.inner.for.inc37: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP43]], 1 // CHECK1-NEXT: store i32 [[ADD38]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND30]] @@ -301,16 +301,16 @@ // CHECK1: cancel.cont41: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP0]]) // CHECK1-NEXT: [[TMP44:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP44]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP47:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP44]]) -// CHECK1-NEXT: [[TMP48:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..3) -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP48]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP51:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP48]]) +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP44]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP46:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP44]]) +// CHECK1-NEXT: [[TMP47:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..3) +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP47]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP49:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP47]]) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..4) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 0, ptr @.omp_outlined..5) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..6, ptr [[ARGC_ADDR]]) -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK1-NEXT: ret i32 [[TMP52]] +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: ret i32 [[TMP50]] // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. @@ -327,7 +327,7 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP3]], i32 1) // CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK1-NEXT: br i1 [[TMP5]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -342,11 +342,11 @@ // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_cancel_barrier(ptr @[[GLOB2]], i32 [[TMP3]]) // CHECK1-NEXT: br label [[RETURN]] // CHECK1: .cancel.continue2: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP10]] to i8 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 0 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 0 // CHECK1-NEXT: store i8 [[CONV]], ptr [[ARRAYIDX3]], align 1 // CHECK1-NEXT: br label [[RETURN]] @@ -368,42 +368,42 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP11]], i32 4) -// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 -// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP9]], i32 4) +// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 +// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] // CHECK1: .cancel.exit.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: .cancel.continue.i: -// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP11]], i32 4) -// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 -// CHECK1-NEXT: br i1 [[TMP15]], label [[DOTCANCEL_EXIT1_I:%.*]], label [[DOTCANCEL_CONTINUE2_I:%.*]] +// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP9]], i32 4) +// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 +// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT1_I:%.*]], label [[DOTCANCEL_CONTINUE2_I:%.*]] // CHECK1: .cancel.exit1.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .cancel.continue2.i: -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !15 // CHECK1-NEXT: ret i32 0 // // @@ -421,35 +421,35 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: [[TMP12:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP11]], i32 4) -// CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 -// CHECK1-NEXT: br i1 [[TMP13]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP9]], i32 4) +// CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 +// CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] // CHECK1: .cancel.exit.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !24 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !25 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__2_EXIT:%.*]] // CHECK1: .cancel.continue.i: -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !24 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !25 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__2_EXIT]] // CHECK1: .omp_outlined..2.exit: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !24 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !25 // CHECK1-NEXT: ret i32 0 // // @@ -470,22 +470,22 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0 // CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 0 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] @@ -506,7 +506,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -537,22 +537,22 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 1 // CHECK1-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1 // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE3:%.*]] @@ -582,7 +582,7 @@ // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -616,54 +616,54 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB5]], i32 [[TMP6]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I3]], align 4 // CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP16]], i32 2) // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK1-NEXT: br i1 [[TMP18]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -671,7 +671,7 @@ // CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]] // CHECK1: .cancel.continue: // CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP20]], i32 2) // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK1-NEXT: br i1 [[TMP22]], label [[DOTCANCEL_EXIT6:%.*]], label [[DOTCANCEL_CONTINUE7:%.*]] @@ -682,7 +682,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -690,12 +690,12 @@ // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB5]], i32 [[TMP25]]) // CHECK1-NEXT: br label [[OMP_PRECOND_END]] // CHECK1: cancel.exit: // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB5]], i32 [[TMP27]]) // CHECK1-NEXT: br label [[CANCEL_CONT:%.*]] // CHECK1: omp.precond.end: diff --git a/clang/test/OpenMP/distribute_codegen.cpp b/clang/test/OpenMP/distribute_codegen.cpp --- a/clang/test/OpenMP/distribute_codegen.cpp +++ b/clang/test/OpenMP/distribute_codegen.cpp @@ -131,58 +131,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8:![0-9]+]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -236,51 +236,51 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -288,7 +288,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -314,58 +314,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -419,51 +419,51 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -471,7 +471,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -497,58 +497,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -602,78 +602,78 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK1-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK1-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK1-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK1-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] @@ -694,52 +694,52 @@ // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP0]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP9]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP10]] to i32 -// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV2]] -// CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: store i8 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[ADD5]] to i64 +// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[ADD4]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP13]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP14]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.8, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 -// CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.8, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 +// CHECK1-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92(i64 [[TMP1]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -776,9 +776,9 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32 // CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -786,46 +786,46 @@ // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV4:%.*]] = sext i8 [[TMP4]] to i32 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV4]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV8:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[MUL]] // CHECK1-NEXT: [[CONV10:%.*]] = trunc i32 [[ADD9]] to i8 @@ -834,7 +834,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -842,7 +842,7 @@ // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) // CHECK1-NEXT: br label [[OMP_PRECOND_END]] // CHECK1: omp.precond.end: @@ -866,39 +866,39 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108(i64 [[TMP1]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -936,59 +936,59 @@ // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !13 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] @@ -1019,58 +1019,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9:![0-9]+]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l56(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1124,55 +1124,55 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1198,58 +1198,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l68(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1303,55 +1303,55 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1377,58 +1377,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l80(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1482,74 +1482,74 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK3-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK3-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK3-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] @@ -1570,52 +1570,52 @@ // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP0]], ptr [[A_CASTED]], align 1 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP9]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[CONV2:%.*]] = sext i8 [[TMP10]] to i32 -// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV2]] -// CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: store i8 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP8]] to i32 +// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK3-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: [[TMP12:%.*]] = zext i32 [[ADD5]] to i64 +// CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 +// CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[ADD4]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.8, ptr [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 [[TMP12]], ptr [[TMP21]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 -// CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.8, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 [[TMP10]], ptr [[TMP19]], align 8 +// CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 +// CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l92(i32 [[TMP1]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1652,9 +1652,9 @@ // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK3-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32 // CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -1662,46 +1662,46 @@ // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV4:%.*]] = sext i8 [[TMP4]] to i32 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV4]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV8:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[MUL]] // CHECK3-NEXT: [[CONV10:%.*]] = trunc i32 [[ADD9]] to i8 @@ -1710,7 +1710,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK3-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1718,7 +1718,7 @@ // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) // CHECK3-NEXT: br label [[OMP_PRECOND_END]] // CHECK3: omp.precond.end: @@ -1742,39 +1742,39 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l108(i32 [[TMP1]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1812,59 +1812,59 @@ // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !14 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !14 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] @@ -1926,51 +1926,51 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9:![0-9]+]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -1978,7 +1978,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK17-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2035,51 +2035,51 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK17-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -2087,7 +2087,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2144,78 +2144,78 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK17-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK17-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK17-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK17-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !11 -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK17-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK17-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK17-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] @@ -2253,9 +2253,9 @@ // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK17-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK17-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32 // CHECK17-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK17-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -2263,46 +2263,46 @@ // CHECK17-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 -// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV4:%.*]] = sext i8 [[TMP4]] to i32 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV4]], 10 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK17: omp.precond.then: // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV8:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK17-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[MUL]] // CHECK17-NEXT: [[CONV10:%.*]] = trunc i32 [[ADD9]] to i8 @@ -2311,7 +2311,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2319,7 +2319,7 @@ // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) // CHECK17-NEXT: br label [[OMP_PRECOND_END]] // CHECK17: omp.precond.end: @@ -2356,59 +2356,59 @@ // CHECK17-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !14 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !14 +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] @@ -2463,55 +2463,55 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2568,55 +2568,55 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK19-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2673,74 +2673,74 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK19-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK19-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK19-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK19-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK19-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK19-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK19-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] @@ -2778,9 +2778,9 @@ // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1 +// CHECK19-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i8 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK19-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32 // CHECK19-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK19-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -2788,46 +2788,46 @@ // CHECK19-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 -// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV4:%.*]] = sext i8 [[TMP4]] to i32 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV4]], 10 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK19: omp.precond.then: // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK19-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK19-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV8:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK19-NEXT: [[ADD9:%.*]] = add nsw i32 [[CONV8]], [[MUL]] // CHECK19-NEXT: [[CONV10:%.*]] = trunc i32 [[ADD9]] to i8 @@ -2836,7 +2836,7 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK19-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2844,7 +2844,7 @@ // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) // CHECK19-NEXT: br label [[OMP_PRECOND_END]] // CHECK19: omp.precond.end: @@ -2881,59 +2881,59 @@ // CHECK19-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !15 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !15 +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] diff --git a/clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/distribute_parallel_for_reduction_task_codegen.cpp @@ -44,10 +44,10 @@ // CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[ARGC_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l14(i64 [[TMP1]], ptr [[TMP2]]) #[[ATTR6:[0-9]+]] // CHECK1-NEXT: ret i32 0 // @@ -60,10 +60,10 @@ // CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[ARGC_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2:[0-9]+]], i32 2, ptr @.omp_outlined., i64 [[TMP1]], ptr [[TMP2]]) // CHECK1-NEXT: ret void // @@ -91,36 +91,36 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..1, i64 [[TMP7]], i64 [[TMP8]], ptr [[ARGC_ADDR]], ptr [[TMP9]]) // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP10]], [[TMP11]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -167,21 +167,21 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP3]], i64 0 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP6]] -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP7]], i64 9 // CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[LB_ADD_LEN]] @@ -217,198 +217,198 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP32]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP35]], i64 0 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 0 -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = sext i32 [[TMP37]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP38]] -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP39]], i64 9 -// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP40]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP44:%.*]] = sub i64 [[TMP42]], [[TMP43]] -// CHECK1-NEXT: [[TMP45:%.*]] = sdiv exact i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = add nuw i64 [[TMP45]], 1 -// CHECK1-NEXT: [[TMP47:%.*]] = mul nuw i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP47]], ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..3, ptr [[TMP51]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP52]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 0 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i64 0 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = sext i32 [[TMP34]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP35]] +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP36]], i64 9 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP37]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP38]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = add nuw i64 [[TMP42]], 1 +// CHECK1-NEXT: [[TMP44:%.*]] = mul nuw i64 [[TMP43]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP44]], ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..3, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[TMP50]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP52:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB2]], i32 [[TMP51]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP52]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP53:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4 -// CHECK1-NEXT: [[TMP56:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB2]], i32 [[TMP54]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP56]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[TMP57]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP58]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP59]], 9 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP54]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP55]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP60]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP56]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP61]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP57]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP58]], [[TMP59]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP64]], 1 +// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP60]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP67]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: [[TMP71:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB2]], i32 [[TMP70]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP73]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP75:%.*]] = load ptr, ptr [[TMP74]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP75]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP71]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP77]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP81]], ptr [[TMP71]]) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP67:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB2]], i32 [[TMP66]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP68]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[TMP69]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP70]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP67]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP71]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP74]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP75]], ptr [[TMP67]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP83:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP83]], 1 +// CHECK1-NEXT: [[TMP77:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP77]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP85]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP79]]) +// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB2]], i32 [[TMP81]], i32 1) +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP83]], align 8 +// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP85:%.*]] = inttoptr i64 [[TMP13]] to ptr +// CHECK1-NEXT: store ptr [[TMP85]], ptr [[TMP84]], align 8 // CHECK1-NEXT: [[TMP86:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB2]], i32 [[TMP87]], i32 1) -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP92:%.*]] = inttoptr i64 [[TMP13]] to ptr -// CHECK1-NEXT: store ptr [[TMP92]], ptr [[TMP91]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP93]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP94]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP96]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP87]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP88]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP97]], [[TMP98]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP89]], [[TMP90]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP99]] +// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP100]] to i32 -// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP101]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP93:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP93]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP99]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP94]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP87]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP102:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP102]] monotonic, align 4 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP104]] +// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP95:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP94]] monotonic, align 4 +// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP105]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP97]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP106:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP111:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP106]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP107]] to i32 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP108]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP103:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP98]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP100]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP110:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP106]], i8 [[TMP109]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP111]] = extractvalue { i8, i1 } [[TMP110]], 0 -// CHECK1-NEXT: [[TMP112:%.*]] = extractvalue { i8, i1 } [[TMP110]], 1 -// CHECK1-NEXT: br i1 [[TMP112]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP102:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP98]], i8 [[TMP101]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP103]] = extractvalue { i8, i1 } [[TMP102]], 0 +// CHECK1-NEXT: [[TMP104:%.*]] = extractvalue { i8, i1 } [[TMP102]], 1 +// CHECK1-NEXT: br i1 [[TMP104]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP104]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP113:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP113]]) +// CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // @@ -419,8 +419,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -431,12 +431,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -449,7 +449,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -471,7 +471,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -480,9 +480,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -525,65 +525,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -595,38 +595,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_private_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_private_codegen.cpp --- a/clang/test/OpenMP/distribute_parallel_for_simd_private_codegen.cpp +++ b/clang/test/OpenMP/distribute_parallel_for_simd_private_codegen.cpp @@ -182,45 +182,45 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store ptr [[G1]], ptr [[_TMP2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !4 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -258,9 +258,9 @@ // CHECK1-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 @@ -268,60 +268,60 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store ptr [[G1]], ptr [[_TMP3]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: store volatile double 1.000000e+00, ptr [[TMP10]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: store volatile double 1.000000e+00, ptr [[TMP10]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[G]], ptr [[TMP11]], align 8, !llvm.access.group !8 +// CHECK1-NEXT: store ptr [[G]], ptr [[TMP11]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP12]], align 8, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP12]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[SVAR]], ptr [[TMP14]], align 8, !llvm.access.group !8 +// CHECK1-NEXT: store ptr [[SVAR]], ptr [[TMP14]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[SFVAR]], ptr [[TMP15]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !8 +// CHECK1-NEXT: store ptr [[SFVAR]], ptr [[TMP15]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -385,43 +385,43 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store ptr [[G1]], ptr [[_TMP2]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group !5 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -459,68 +459,68 @@ // CHECK3-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store ptr [[G1]], ptr [[_TMP2]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: store volatile double 1.000000e+00, ptr [[TMP10]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: store volatile double 1.000000e+00, ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[G]], ptr [[TMP11]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store ptr [[G]], ptr [[TMP11]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP12]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP12]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[SVAR]], ptr [[TMP14]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store ptr [[SVAR]], ptr [[TMP14]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[SFVAR]], ptr [[TMP15]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !9 +// CHECK3-NEXT: store ptr [[SFVAR]], ptr [[TMP15]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK3-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -588,27 +588,27 @@ // CHECK9-NEXT: store ptr [[TEST]], ptr [[VAR]], align 8 // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK9-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK9-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95() #[[ATTR4:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -616,18 +616,18 @@ // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK9-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK9: arraydestroy.done2: // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP13]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP12]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -648,7 +648,7 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK9-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK9-NEXT: ret void // @@ -699,49 +699,49 @@ // CHECK9-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !5 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -790,9 +790,9 @@ // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 @@ -811,71 +811,71 @@ // CHECK9-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP3]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP13]] to i64 // CHECK9-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i64 0, i64 [[IDXPROM5]] -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[TMP12]], i64 4, i1 false), !llvm.access.group !9 +// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[TMP12]], i64 4, i1 false), !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: -// CHECK9-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP18]]) -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK9-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK9-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 +// CHECK9-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN8]], i64 2 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN8]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP21]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN8]] @@ -915,45 +915,45 @@ // CHECK9-NEXT: store ptr [[TEST]], ptr [[VAR]], align 8 // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK9-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK9-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49() #[[ATTR4]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK9-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK9: arraydestroy.done2: // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP13]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP12]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -976,7 +976,7 @@ // CHECK9-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK9-NEXT: ret void // @@ -1008,7 +1008,7 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef signext [[TMP0]]) // CHECK9-NEXT: ret void // @@ -1058,49 +1058,49 @@ // CHECK9-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !14 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1148,9 +1148,9 @@ // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 @@ -1169,71 +1169,71 @@ // CHECK9-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP3]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP3]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP13]] to i64 // CHECK9-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i64 0, i64 [[IDXPROM5]] -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[TMP12]], i64 4, i1 false), !llvm.access.group !17 +// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[TMP12]], i64 4, i1 false), !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: -// CHECK9-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP18]]) -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK9-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK9-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 +// CHECK9-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN8]], i64 2 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN8]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP21]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN8]] @@ -1272,7 +1272,7 @@ // CHECK9-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK9-NEXT: ret void // @@ -1318,27 +1318,27 @@ // CHECK11-NEXT: store ptr [[TEST]], ptr [[VAR]], align 4 // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK11-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK11-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l95() #[[ATTR4:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1346,18 +1346,18 @@ // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK11-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK11: arraydestroy.done2: // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP13]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP12]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -1378,7 +1378,7 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK11-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK11-NEXT: ret void // @@ -1429,47 +1429,47 @@ // CHECK11-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group !6 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP12]]) -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -1518,8 +1518,8 @@ // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 @@ -1537,69 +1537,69 @@ // CHECK11-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 [[TMP13]] -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP12]], i32 4, i1 false), !llvm.access.group !10 +// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP12]], i32 4, i1 false), !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: -// CHECK11-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP18]]) -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK11-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 +// CHECK11-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: // CHECK11-NEXT: store i32 2, ptr [[I]], align 4 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK11: .omp.final.done: // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i32 2 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP21]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]] @@ -1639,45 +1639,45 @@ // CHECK11-NEXT: store ptr [[TEST]], ptr [[VAR]], align 4 // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK11-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK11-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49() #[[ATTR4]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK11-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK11: arraydestroy.done2: // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP13]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP12]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -1700,7 +1700,7 @@ // CHECK11-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK11-NEXT: ret void // @@ -1732,7 +1732,7 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK11-NEXT: ret void // @@ -1782,47 +1782,47 @@ // CHECK11-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group !15 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i32 [[TMP7]], i32 [[TMP8]]), !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP9]], [[TMP10]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP12]]) -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -1870,8 +1870,8 @@ // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 @@ -1889,69 +1889,69 @@ // CHECK11-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 [[TMP13]] -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP12]], i32 4, i1 false), !llvm.access.group !18 +// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP12]], i32 4, i1 false), !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: -// CHECK11-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP18]]) -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK11-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 +// CHECK11-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: // CHECK11-NEXT: store i32 2, ptr [[I]], align 4 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK11: .omp.final.done: // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN6]], i32 2 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN6]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP21]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]] @@ -1990,7 +1990,7 @@ // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK11-NEXT: ret void // @@ -2047,8 +2047,8 @@ // CHECK13-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 // CHECK13-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK13-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2063,43 +2063,43 @@ // CHECK13-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK13: omp.inner.for.cond.cleanup: // CHECK13-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP8]] to i64 +// CHECK13-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i64 0, i64 [[IDXPROM7]] -// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group !2 +// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP6]], i64 4, i1 false), !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 2, ptr [[I]], align 4 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]] // CHECK13-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN10]], i64 2 +// CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN10]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK13: arraydestroy.body: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]] @@ -2108,18 +2108,18 @@ // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN12]], i64 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN12]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY13:%.*]] // CHECK13: arraydestroy.body13: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT15]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST14]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT15]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE16:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT15]], [[ARRAY_BEGIN12]] // CHECK13-NEXT: br i1 [[ARRAYDESTROY_DONE16]], label [[ARRAYDESTROY_DONE17:%.*]], label [[ARRAYDESTROY_BODY13]] // CHECK13: arraydestroy.done17: // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP14]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP11]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -2140,7 +2140,7 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK13-NEXT: ret void // @@ -2186,8 +2186,8 @@ // CHECK13-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 // CHECK13-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK13-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2202,43 +2202,43 @@ // CHECK13-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK13: omp.inner.for.cond.cleanup: // CHECK13-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP8]] to i64 +// CHECK13-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i64 0, i64 [[IDXPROM7]] -// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group !6 +// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP6]], i64 4, i1 false), !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 2, ptr [[I]], align 4 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN10]], i64 2 +// CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN10]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK13: arraydestroy.body: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]] @@ -2246,18 +2246,18 @@ // CHECK13: arraydestroy.done11: // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN12]], i64 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN12]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY13:%.*]] // CHECK13: arraydestroy.body13: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT15]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST14]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT15]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE16:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT15]], [[ARRAY_BEGIN12]] // CHECK13-NEXT: br i1 [[ARRAYDESTROY_DONE16]], label [[ARRAYDESTROY_DONE17:%.*]], label [[ARRAYDESTROY_BODY13]] // CHECK13: arraydestroy.done17: // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP14]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP11]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -2289,7 +2289,7 @@ // CHECK13-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK13-NEXT: ret void // @@ -2312,7 +2312,7 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef signext [[TMP0]]) // CHECK13-NEXT: ret void // @@ -2347,7 +2347,7 @@ // CHECK13-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK13-NEXT: ret void // @@ -2397,8 +2397,8 @@ // CHECK15-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 // CHECK15-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK15-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2413,41 +2413,41 @@ // CHECK15-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK15: omp.inner.for.cond.cleanup: // CHECK15-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 [[TMP8]] -// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP7]], i32 4, i1 false), !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP5]] +// CHECK15-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 [[TMP7]] +// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP6]], i32 4, i1 false), !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 2, ptr [[I]], align 4 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]] // CHECK15-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN9]], i32 2 +// CHECK15-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN9]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK15: arraydestroy.body: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] @@ -2456,18 +2456,18 @@ // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN11]], i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN11]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY12:%.*]] // CHECK15: arraydestroy.body12: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT14]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST13]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT14]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE15:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT14]], [[ARRAY_BEGIN11]] // CHECK15-NEXT: br i1 [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_DONE16:%.*]], label [[ARRAYDESTROY_BODY12]] // CHECK15: arraydestroy.done16: // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP14]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP11]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -2488,7 +2488,7 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK15-NEXT: ret void // @@ -2534,8 +2534,8 @@ // CHECK15-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 // CHECK15-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK15-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2550,41 +2550,41 @@ // CHECK15-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK15: omp.inner.for.cond.cleanup: // CHECK15-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 [[TMP8]] -// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP7]], i32 4, i1 false), !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP5]] +// CHECK15-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 [[TMP7]] +// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP6]], i32 4, i1 false), !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 2, ptr [[I]], align 4 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN9]], i32 2 +// CHECK15-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN9]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK15: arraydestroy.body: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] @@ -2592,18 +2592,18 @@ // CHECK15: arraydestroy.done10: // CHECK15-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN11]], i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN11]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY12:%.*]] // CHECK15: arraydestroy.body12: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT14]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST13]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT14]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE15:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT14]], [[ARRAY_BEGIN11]] // CHECK15-NEXT: br i1 [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_DONE16:%.*]], label [[ARRAYDESTROY_BODY12]] // CHECK15: arraydestroy.done16: // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP14]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP11]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -2635,7 +2635,7 @@ // CHECK15-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK15-NEXT: ret void // @@ -2658,7 +2658,7 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK15-NEXT: ret void // @@ -2693,7 +2693,7 @@ // CHECK15-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK15-NEXT: ret void // diff --git a/clang/test/OpenMP/distribute_parallel_for_simd_proc_bind_codegen.cpp b/clang/test/OpenMP/distribute_parallel_for_simd_proc_bind_codegen.cpp --- a/clang/test/OpenMP/distribute_parallel_for_simd_proc_bind_codegen.cpp +++ b/clang/test/OpenMP/distribute_parallel_for_simd_proc_bind_codegen.cpp @@ -142,46 +142,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group !6 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !6 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -211,54 +211,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -294,46 +294,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group !15 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -363,54 +363,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -479,46 +479,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group !21 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !21 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -548,54 +548,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -629,52 +629,52 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK3: omp.body.continue12: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK3: omp.inner.for.inc13: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end15: // CHECK3-NEXT: store i32 1000, ptr [[I6]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -691,27 +691,27 @@ // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/distribute_simd_codegen.cpp b/clang/test/OpenMP/distribute_simd_codegen.cpp --- a/clang/test/OpenMP/distribute_simd_codegen.cpp +++ b/clang/test/OpenMP/distribute_simd_codegen.cpp @@ -159,58 +159,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8:![0-9]+]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -259,74 +259,74 @@ // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]] -// CHECK1-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]] -// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]] -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]] -// CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -351,58 +351,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -456,51 +456,51 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK1-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -508,15 +508,15 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK1-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -541,58 +541,58 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK1-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -646,84 +646,84 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK1-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK1-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK1-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK1-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK1-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -745,63 +745,63 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK1-NEXT: store i8 [[TMP16]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV3]] -// CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[SUB]], 1 -// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB4]], 1 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK1-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK1-NEXT: [[TMP19:%.*]] = zext i32 [[ADD6]] to i64 +// CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 2, ptr [[TMP21]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.8, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 -// CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 2, ptr [[TMP17]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.8, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -843,9 +843,9 @@ // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK1-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK1-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -853,71 +853,71 @@ // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !20 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !20 +// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK1-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK1-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !20 +// CHECK1-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP21]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK1-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32 -// CHECK1-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32 // CHECK1-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK1-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -951,39 +951,39 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i64 [[TMP1]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1021,65 +1021,65 @@ // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !23 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !23 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1111,58 +1111,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9:![0-9]+]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1211,70 +1211,70 @@ // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK3-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]] -// CHECK3-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]] -// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]] -// CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]] -// CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK3-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1299,58 +1299,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1404,63 +1404,63 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK3-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1485,58 +1485,58 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK3-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1590,80 +1590,80 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK3-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK3-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK3-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK3-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1685,63 +1685,63 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK3-NEXT: store i8 [[TMP16]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV3]] -// CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[SUB]], 1 -// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB4]], 1 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 +// CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK3-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK3-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK3-NEXT: [[TMP19:%.*]] = zext i32 [[ADD6]] to i64 +// CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK3-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 2, ptr [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.8, ptr [[TMP24]], align 4 -// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 [[TMP19]], ptr [[TMP28]], align 8 -// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 -// CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 2, ptr [[TMP17]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.8, ptr [[TMP20]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8 +// CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1783,9 +1783,9 @@ // CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK3-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK3-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -1793,71 +1793,71 @@ // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !21 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !21 +// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK3-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK3-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !21 +// CHECK3-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP22]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK3-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK3-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: -// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32 -// CHECK3-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32 // CHECK3-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK3-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -1891,39 +1891,39 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i32 [[TMP1]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1961,65 +1961,65 @@ // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !24 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !24 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK3-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2051,58 +2051,58 @@ // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8:![0-9]+]] +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 8 -// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK5-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK5-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK5-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2151,74 +2151,74 @@ // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ] // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64 // CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]] -// CHECK5-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64 // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]] -// CHECK5-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]] -// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64 // CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]] -// CHECK5-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK5-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK5-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -2243,58 +2243,58 @@ // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 8 -// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK5-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK5-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK5-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2348,51 +2348,51 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK5-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal !15 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal !16, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK5-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK5-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK5-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal !15 -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal !16, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK5-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -2400,15 +2400,15 @@ // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK5-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -2433,58 +2433,58 @@ // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 8 +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 8 -// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP33]], align 8 -// CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK5-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK5-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK5-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2538,84 +2538,84 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK5: omp.dispatch.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK5-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK5-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK5-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK5-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK5-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK5-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !18 -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK5-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK5-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK5-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -2637,63 +2637,63 @@ // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK5-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK5-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK5-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[I_CASTED]], align 8, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP11]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK5-NEXT: store i8 [[TMP16]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[CONV3:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK5-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV3]] -// CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[SUB]], 1 -// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB4]], 1 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP5]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP7]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: store i64 [[TMP3]], ptr [[TMP8]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store ptr null, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 +// CHECK5-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK5-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK5-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK5-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK5-NEXT: [[TMP19:%.*]] = zext i32 [[ADD6]] to i64 +// CHECK5-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK5-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK5-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 1, ptr [[TMP20]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 2, ptr [[TMP21]], align 4 -// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes.8, ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP25]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP26]], align 8 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 [[TMP19]], ptr [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 -// CHECK5-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 1, ptr [[TMP16]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 2, ptr [[TMP17]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes.8, ptr [[TMP20]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK5-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i64 [[TMP1]], i64 [[TMP3]]) #[[ATTR3]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2735,9 +2735,9 @@ // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK5-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK5-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK5-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -2745,75 +2745,75 @@ // CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK5-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK5-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK5-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK5: omp.precond.then: // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK5-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK5-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !21 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !21 +// CHECK5-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK5-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK5-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !15, !llvm.access.group !21 +// CHECK5-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !16, !llvm.access.group [[ACC_GRP22]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK5: omp.inner.for.cond13: -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK5-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK5: omp.inner.for.body15: -// CHECK5-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK5-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK5-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -2822,25 +2822,25 @@ // CHECK5: omp.body.continue20: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK5: omp.inner.for.inc21: -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK5-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK5: omp.inner.for.end23: // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK5-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: -// CHECK5-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32 -// CHECK5-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32 // CHECK5-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK5-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -2874,39 +2874,39 @@ // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK5-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK5-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK5-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 8 -// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 8 -// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK5-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) -// CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK5-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK5-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK5-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK5-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK5-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK5-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK5-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK5-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK5-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) +// CHECK5-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK5-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i64 [[TMP1]]) #[[ATTR3]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2944,65 +2944,65 @@ // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK5: omp.dispatch.cond: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !26 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !26 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]], !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !26 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !26 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !26 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !26 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF8]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF8]] // CHECK5-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK5-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -3034,58 +3034,58 @@ // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9:![0-9]+]] +// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK7-NEXT: store ptr @.offload_sizes, ptr [[TMP30]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes, ptr [[TMP31]], align 4 -// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK7-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK7-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK7-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK7-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK7-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK7-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK7-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK7-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK7-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z23without_schedule_clausePfS_S_S__l70(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3:[0-9]+]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3134,70 +3134,70 @@ // CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ] // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK7-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]] -// CHECK7-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK7-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]] -// CHECK7-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]] -// CHECK7-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]] -// CHECK7-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK7-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -3222,58 +3222,58 @@ // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK7-NEXT: store ptr @.offload_sizes.2, ptr [[TMP30]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP31]], align 4 -// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK7-NEXT: store i64 4571424, ptr [[TMP34]], align 8 -// CHECK7-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK7-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK7-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK7-NEXT: store ptr @.offload_sizes.2, ptr [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK7-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP23]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK7-NEXT: store i64 4571424, ptr [[TMP26]], align 8 +// CHECK7-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK7-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z18static_not_chunkedPfS_S_S__l86(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3327,63 +3327,63 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK7-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal !16 -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal !17, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK7-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK7-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK7-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK7-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK7-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal !16 -// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal !17, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK7-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK7-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK7-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -3408,58 +3408,58 @@ // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP9]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP11]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP14]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP19]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP21]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr [[TMP0]], ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP10]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP2]], ptr [[TMP11]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP13]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[TMP14]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 1, ptr [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 4, ptr [[TMP27]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP25]], ptr [[TMP29]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK7-NEXT: store ptr @.offload_sizes.5, ptr [[TMP30]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP31]], align 4 -// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP32]], align 4 -// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP33]], align 4 -// CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK7-NEXT: store i64 16908289, ptr [[TMP34]], align 8 -// CHECK7-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) -// CHECK7-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 -// CHECK7-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 4, ptr [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK7-NEXT: store ptr @.offload_sizes.5, ptr [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK7-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP23]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK7-NEXT: store i64 16908289, ptr [[TMP26]], align 8 +// CHECK7-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103.region_id, ptr [[KERNEL_ARGS]]) +// CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK7-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14static_chunkedPfS_S_S__l103(ptr [[TMP0]], ptr [[TMP1]], ptr [[TMP2]], ptr [[TMP3]]) #[[ATTR3]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3513,80 +3513,80 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK7: omp.dispatch.cond: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK7-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK7-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK7-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK7-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK7-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK7-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK7-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK7-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: -// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK7-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK7-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK7-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -3608,63 +3608,63 @@ // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x ptr], align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 -// CHECK7-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK7-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK7-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1 +// CHECK7-NEXT: [[TMP0:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i8 [[TMP0]], ptr [[I_CASTED]], align 1 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[I_CASTED]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i8 [[TMP2]], ptr [[A_CASTED]], align 1 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP6]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP9]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP11]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP16:%.*]] = load i8, ptr [[A]], align 1 -// CHECK7-NEXT: store i8 [[TMP16]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[CONV3:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK7-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV3]] -// CHECK7-NEXT: [[SUB4:%.*]] = sub i32 [[SUB]], 1 -// CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB4]], 1 +// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 [[TMP3]], ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store ptr null, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP12:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: store i8 [[TMP12]], ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[CONV:%.*]] = sext i8 [[TMP13]] to i32 +// CHECK7-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] +// CHECK7-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 +// CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB2]], 1 // CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 -// CHECK7-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 -// CHECK7-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK7-NEXT: [[TMP19:%.*]] = zext i32 [[ADD6]] to i64 +// CHECK7-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK7-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK7-NEXT: [[TMP15:%.*]] = zext i32 [[ADD4]] to i64 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 1, ptr [[TMP20]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 2, ptr [[TMP21]], align 4 -// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK7-NEXT: store ptr @.offload_sizes.8, ptr [[TMP24]], align 4 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP25]], align 4 -// CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP27]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK7-NEXT: store i64 [[TMP19]], ptr [[TMP28]], align 8 -// CHECK7-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) -// CHECK7-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 -// CHECK7-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 1, ptr [[TMP16]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 2, ptr [[TMP17]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP10]], ptr [[TMP18]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP11]], ptr [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK7-NEXT: store ptr @.offload_sizes.8, ptr [[TMP20]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK7-NEXT: store ptr @.offload_maptypes.9, ptr [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK7-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK7-NEXT: store i64 [[TMP15]], ptr [[TMP24]], align 8 +// CHECK7-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115.region_id, ptr [[KERNEL_ARGS]]) +// CHECK7-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK7-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z12test_precondv_l115(i32 [[TMP1]], i32 [[TMP3]]) #[[ATTR3]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3706,9 +3706,9 @@ // CHECK7-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK7-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK7-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK7-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -3716,75 +3716,75 @@ // CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK7-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK7-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK7-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK7-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK7: omp.precond.then: // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK7-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK7-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !22 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !22 +// CHECK7-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK7-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK7-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK7-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !16, !llvm.access.group !22 +// CHECK7-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !17, !llvm.access.group [[ACC_GRP23]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK7-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK7: omp.inner.for.cond13: -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK7-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK7: omp.inner.for.body15: -// CHECK7-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK7-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK7-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -3793,25 +3793,25 @@ // CHECK7: omp.body.continue20: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK7: omp.inner.for.inc21: -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK7-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK7: omp.inner.for.end23: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK7-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: -// CHECK7-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32 -// CHECK7-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32 // CHECK7-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK7-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -3845,39 +3845,39 @@ // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK7-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK7-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK7-NEXT: store ptr @.offload_sizes.11, ptr [[TMP13]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP14]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK7-NEXT: store i64 100, ptr [[TMP17]], align 8 -// CHECK7-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) -// CHECK7-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK7-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK7-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK7-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK7-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK7-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK7-NEXT: store ptr @.offload_sizes.11, ptr [[TMP11]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK7-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP12]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK7-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK7-NEXT: store i64 100, ptr [[TMP15]], align 8 +// CHECK7-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135.region_id, ptr [[KERNEL_ARGS]]) +// CHECK7-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK7-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_v_l135(i32 [[TMP1]]) #[[ATTR3]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3915,65 +3915,65 @@ // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK7: omp.dispatch.cond: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !27 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !27 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK7-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK7-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK7-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -4008,51 +4008,51 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 16) ] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP9]] to i64 // CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM1]] -// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM4]] -// CHECK9-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP13]] -// CHECK9-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group !2 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP15]] to i64 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM7]] -// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 32000001, ptr [[I]], align 4 // CHECK9-NEXT: ret void @@ -4076,38 +4076,38 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK9-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]] -// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]] -// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]] -// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP14]] to i64 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]] // CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4 @@ -4115,10 +4115,10 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 32, ptr [[I]], align 4 // CHECK9-NEXT: ret void @@ -4142,49 +4142,49 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127 // CHECK9-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP5]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]] -// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]] -// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]] -// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP14]] to i64 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]] -// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD9:%.*]] = add i32 [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 -2147483522, ptr [[I]], align 4 // CHECK9-NEXT: ret void @@ -4206,9 +4206,9 @@ // CHECK9-NEXT: [[I6:%.*]] = alloca i8, align 1 // CHECK9-NEXT: [[I7:%.*]] = alloca i8, align 1 // CHECK9-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK9-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // CHECK9-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK9-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -4217,45 +4217,45 @@ // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1 -// CHECK9-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK9-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK9: simd.if.then: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1 +// CHECK9-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV9:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK9-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK9-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !12 +// CHECK9-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP13]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK9-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK9: omp.inner.for.end: -// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV13:%.*]] = sext i8 [[TMP12]] to i32 -// CHECK9-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV14:%.*]] = sext i8 [[TMP13]] to i32 // CHECK9-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK9-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -4289,27 +4289,27 @@ // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK9-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 100, ptr [[I]], align 4 // CHECK9-NEXT: ret i32 0 @@ -4333,47 +4333,47 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i32 16) ] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK11-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[TMP6]] -// CHECK11-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP9]] -// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[TMP12]] -// CHECK11-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP13]] -// CHECK11-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 32000001, ptr [[I]], align 4 // CHECK11-NEXT: ret void @@ -4397,45 +4397,45 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK11-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]] -// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]] -// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]] -// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]] -// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] // CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 32, ptr [[I]], align 4 // CHECK11-NEXT: ret void @@ -4459,45 +4459,45 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127 // CHECK11-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]] -// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]] -// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]] -// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]] -// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add i32 [[TMP15]], 1 -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 -2147483522, ptr [[I]], align 4 // CHECK11-NEXT: ret void @@ -4519,9 +4519,9 @@ // CHECK11-NEXT: [[I6:%.*]] = alloca i8, align 1 // CHECK11-NEXT: [[I7:%.*]] = alloca i8, align 1 // CHECK11-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK11-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // CHECK11-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK11-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -4530,45 +4530,45 @@ // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1 -// CHECK11-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK11-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK11: simd.if.then: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1 +// CHECK11-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV9:%.*]] = sext i8 [[TMP9]] to i32 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK11-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK11-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !13 +// CHECK11-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP14]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK11: omp.inner.for.end: -// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV13:%.*]] = sext i8 [[TMP12]] to i32 -// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV14:%.*]] = sext i8 [[TMP13]] to i32 // CHECK11-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK11-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -4602,27 +4602,27 @@ // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 100, ptr [[I]], align 4 // CHECK11-NEXT: ret i32 0 @@ -4646,51 +4646,51 @@ // CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK13-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 16) ] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP9]] to i64 // CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM1]] -// CHECK13-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK13-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP12]] to i64 // CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM4]] -// CHECK13-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP13]] -// CHECK13-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP15]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM7]] -// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 32000001, ptr [[I]], align 4 // CHECK13-NEXT: ret void @@ -4714,38 +4714,38 @@ // CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK13-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nontemporal !7 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !nontemporal !8, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]] -// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64 // CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]] -// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]] -// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nontemporal !7 -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4 +// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !nontemporal !8, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP14]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]] // CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4 @@ -4753,10 +4753,10 @@ // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK13-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 32, ptr [[I]], align 4 // CHECK13-NEXT: ret void @@ -4780,49 +4780,49 @@ // CHECK13-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127 // CHECK13-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK13-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM1]] -// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL3:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP11]] to i64 // CHECK13-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM4]] -// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP12]] -// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group !10 -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP14]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM7]] -// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD9:%.*]] = add i32 [[TMP15]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 -2147483522, ptr [[I]], align 4 // CHECK13-NEXT: ret void @@ -4844,9 +4844,9 @@ // CHECK13-NEXT: [[I6:%.*]] = alloca i8, align 1 // CHECK13-NEXT: [[I7:%.*]] = alloca i8, align 1 // CHECK13-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK13-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK13-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK13-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // CHECK13-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK13-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -4855,58 +4855,58 @@ // CHECK13-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1 -// CHECK13-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1 +// CHECK13-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1 -// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1 +// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP7]], 0 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK13: omp_if.then: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !13 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !13 +// CHECK13-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV9:%.*]] = sext i8 [[TMP10]] to i32 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK13-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK13-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !7, !llvm.access.group !13 +// CHECK13-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !8, !llvm.access.group [[ACC_GRP14]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK13-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: br label [[OMP_IF_END:%.*]] // CHECK13: omp_if.else: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK13: omp.inner.for.cond13: -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK13-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK13: omp.inner.for.body15: -// CHECK13-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV16:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK13-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -4915,16 +4915,16 @@ // CHECK13: omp.body.continue20: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK13: omp.inner.for.inc21: -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK13-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK13: omp.inner.for.end23: // CHECK13-NEXT: br label [[OMP_IF_END]] // CHECK13: omp_if.end: -// CHECK13-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV24:%.*]] = sext i8 [[TMP18]] to i32 -// CHECK13-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV25:%.*]] = sext i8 [[TMP19]] to i32 // CHECK13-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK13-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -4958,27 +4958,27 @@ // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 100, ptr [[I]], align 4 // CHECK13-NEXT: ret i32 0 @@ -5002,47 +5002,47 @@ // CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i32 16) ] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 [[TMP6]] -// CHECK15-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 [[TMP9]] -// CHECK15-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK15-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 [[TMP12]] -// CHECK15-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP13]] -// CHECK15-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 32000001, ptr [[I]], align 4 // CHECK15-NEXT: ret void @@ -5066,45 +5066,45 @@ // CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 7 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK15-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nontemporal !8 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !nontemporal !9, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]] -// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]] -// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]] -// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]] -// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nontemporal !8 -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !nontemporal !9, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] // CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK15-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 32, ptr [[I]], align 4 // CHECK15-NEXT: ret void @@ -5128,45 +5128,45 @@ // CHECK15-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 16908288, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul i32 [[TMP3]], 127 // CHECK15-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK15-NEXT: [[TMP4:%.*]] = load ptr, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 [[TMP5]] -// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[C_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 [[TMP8]] -// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL2:%.*]] = fmul float [[TMP6]], [[TMP9]] -// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP10:%.*]] = load ptr, ptr [[D_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 [[TMP11]] -// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL4:%.*]] = fmul float [[MUL2]], [[TMP12]] -// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP13:%.*]] = load ptr, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: store float [[MUL4]], ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD6:%.*]] = add i32 [[TMP15]], 1 -// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 -2147483522, ptr [[I]], align 4 // CHECK15-NEXT: ret void @@ -5188,9 +5188,9 @@ // CHECK15-NEXT: [[I6:%.*]] = alloca i8, align 1 // CHECK15-NEXT: [[I7:%.*]] = alloca i8, align 1 // CHECK15-NEXT: store i8 0, ptr [[A]], align 1 -// CHECK15-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 +// CHECK15-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i8 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK15-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sext i8 [[TMP1]] to i32 // CHECK15-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK15-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -5199,58 +5199,58 @@ // CHECK15-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i8 [[TMP3]], ptr [[I4]], align 1 -// CHECK15-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV5:%.*]] = sext i8 [[TMP4]] to i32 // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1 +// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i8 [[TMP6]], ptr [[DOTLINEAR_START]], align 1 -// CHECK15-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1 +// CHECK15-NEXT: [[TMP7:%.*]] = load i8, ptr [[A]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP7]], 0 // CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK15: omp_if.then: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !14 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !14 +// CHECK15-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV9:%.*]] = sext i8 [[TMP10]] to i32 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK15-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK15-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK15-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !8, !llvm.access.group !14 +// CHECK15-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !9, !llvm.access.group [[ACC_GRP15]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK15-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] // CHECK15: omp_if.else: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK15: omp.inner.for.cond13: -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK15-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK15: omp.inner.for.body15: -// CHECK15-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP15:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV16:%.*]] = sext i8 [[TMP15]] to i32 -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK15-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -5259,16 +5259,16 @@ // CHECK15: omp.body.continue20: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK15: omp.inner.for.inc21: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK15-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK15: omp.inner.for.end23: // CHECK15-NEXT: br label [[OMP_IF_END]] // CHECK15: omp_if.end: -// CHECK15-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP18:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV24:%.*]] = sext i8 [[TMP18]] to i32 -// CHECK15-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV25:%.*]] = sext i8 [[TMP19]] to i32 // CHECK15-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK15-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -5302,27 +5302,27 @@ // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !19 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !19 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 100, ptr [[I]], align 4 // CHECK15-NEXT: ret i32 0 @@ -5369,74 +5369,74 @@ // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF9:![0-9]+]] // CHECK17-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ] // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64 // CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]] -// CHECK17-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK17-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64 // CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]] -// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]] -// CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !9 -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64 // CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]] -// CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK17-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK17-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -5492,51 +5492,51 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK17-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK17-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK17-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -5544,15 +5544,15 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK17-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK17-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -5608,84 +5608,84 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK17-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK17-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK17-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK17-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK17-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !18 -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK17-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK17-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK17-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK17: omp.dispatch.end: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK17-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -5729,9 +5729,9 @@ // CHECK17-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK17-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK17-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK17-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK17-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -5739,71 +5739,71 @@ // CHECK17-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK17-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK17: omp.precond.then: // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !21 +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK17-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !21 +// CHECK17-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK17-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK17-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !21 +// CHECK17-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP22]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK17-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: -// CHECK17-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32 -// CHECK17-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK17-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32 // CHECK17-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK17-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -5850,65 +5850,65 @@ // CHECK17-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !24 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !24 +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK17: omp.dispatch.end: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK17-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK17-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -5959,70 +5959,70 @@ // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK19-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ] // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK19-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]] -// CHECK19-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK19-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]] -// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]] -// CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]] -// CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK19-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -6078,63 +6078,63 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK19-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK19-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK19-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK19-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -6190,80 +6190,80 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK19-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK19-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK19-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK19-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK19-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK19-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK19-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK19: omp.dispatch.end: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK19-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -6307,9 +6307,9 @@ // CHECK19-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK19-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK19-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK19-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK19-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -6317,71 +6317,71 @@ // CHECK19-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK19-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK19: omp.precond.then: // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !22 +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK19-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !22 +// CHECK19-NEXT: [[TMP16:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV9:%.*]] = sext i8 [[TMP16]] to i32 -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK19-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK19-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group !22 +// CHECK19-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !llvm.access.group [[ACC_GRP23]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK19-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK19-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: -// CHECK19-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV13:%.*]] = sext i8 [[TMP23]] to i32 -// CHECK19-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK19-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV14:%.*]] = sext i8 [[TMP24]] to i32 // CHECK19-NEXT: [[SUB15:%.*]] = sub i32 10, [[CONV14]] // CHECK19-NEXT: [[SUB16:%.*]] = sub i32 [[SUB15]], 1 @@ -6428,65 +6428,65 @@ // CHECK19-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !25 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !25 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !25 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !25 +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !25 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !25 -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK19: omp.dispatch.end: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK19-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK19-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -6537,74 +6537,74 @@ // CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF9:![0-9]+]] // CHECK21-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i64 16) ] // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK21-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK21-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP17]] to i64 // CHECK21-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM2]] -// CHECK21-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL4:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK21-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP20]] to i64 // CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i64 [[IDXPROM5]] -// CHECK21-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP21]] -// CHECK21-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !9 -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP23]] to i64 // CHECK21-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i64 [[IDXPROM8]] -// CHECK21-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK21-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK21-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -6660,51 +6660,51 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK21-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK21-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal !16 -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK21-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 8, !nontemporal !17, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM]] -// CHECK21-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK21-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK21-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM2:%.*]] = sext i32 [[TMP16]] to i64 // CHECK21-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM2]] -// CHECK21-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4 +// CHECK21-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL4:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK21-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK21-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP19]] to i64 // CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM5]] -// CHECK21-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK21-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL7:%.*]] = fmul float [[MUL4]], [[TMP20]] -// CHECK21-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal !16 -// CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK21-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 8, !nontemporal !17, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP22]] to i64 // CHECK21-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM8]] // CHECK21-NEXT: store float [[MUL7]], ptr [[ARRAYIDX9]], align 4 @@ -6712,15 +6712,15 @@ // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK21-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK21-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -6776,84 +6776,84 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK21: omp.dispatch.cond: -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK21: omp.dispatch.body: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK21-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP15]] to i64 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK21-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM3:%.*]] = zext i32 [[TMP18]] to i64 // CHECK21-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM3]] -// CHECK21-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL5:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK21-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM6:%.*]] = zext i32 [[TMP21]] to i64 // CHECK21-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM6]] -// CHECK21-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP22]] -// CHECK21-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group !19 -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[IDXPROM9:%.*]] = zext i32 [[TMP24]] to i64 // CHECK21-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM9]] -// CHECK21-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 +// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD11:%.*]] = add i32 [[TMP25]], 1 -// CHECK21-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !19 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK21: omp.dispatch.inc: -// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD12:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK21-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD13:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK21-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK21: omp.dispatch.end: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK21-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -6897,9 +6897,9 @@ // CHECK21-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK21-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK21-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK21-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK21-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -6907,75 +6907,75 @@ // CHECK21-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK21-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK21-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK21-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK21-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK21-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK21: omp.precond.then: // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK21-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK21-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !22 +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK21-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !22 +// CHECK21-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK21-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK21-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK21-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !16, !llvm.access.group !22 +// CHECK21-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !17, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK21-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !22 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK21: omp.inner.for.cond13: -// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK21-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK21: omp.inner.for.body15: -// CHECK21-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK21-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK21-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -6984,25 +6984,25 @@ // CHECK21: omp.body.continue20: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK21: omp.inner.for.inc21: -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK21-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK21: omp.inner.for.end23: // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK21-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: -// CHECK21-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32 -// CHECK21-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32 // CHECK21-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK21-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -7049,65 +7049,65 @@ // CHECK21-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK21-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK21-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK21: omp.dispatch.cond: -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK21: omp.dispatch.body: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !27 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !27 +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK21-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !27 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK21: omp.dispatch.inc: -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK21-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK21: omp.dispatch.end: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK21-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK21-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -7158,70 +7158,70 @@ // CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK23-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP4]], i32 16) ] // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 4571423, ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 4571423 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 7 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 33, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK23-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP14]] -// CHECK23-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP17]] -// CHECK23-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL3:%.*]] = fmul float [[TMP15]], [[TMP18]] -// CHECK23-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP19]], i32 [[TMP20]] -// CHECK23-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP21]] -// CHECK23-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP22]], i32 [[TMP23]] -// CHECK23-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK23-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP6]]) -// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK23-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -7277,63 +7277,63 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 4571423 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 4571423, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 7 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK23-NEXT: store i32 [[SUB]], ptr [[I]], align 4 -// CHECK23-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal !17 -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK23-NEXT: [[TMP12:%.*]] = load ptr, ptr [[TMP1]], align 4, !nontemporal !18, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 [[TMP13]] -// CHECK23-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK23-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4 -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK23-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i32 [[TMP16]] -// CHECK23-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK23-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL3:%.*]] = fmul float [[TMP14]], [[TMP17]] -// CHECK23-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4 -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4 +// CHECK23-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 [[TMP19]] -// CHECK23-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK23-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP20]] -// CHECK23-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal !17 -// CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4 +// CHECK23-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP0]], align 4, !nontemporal !18, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i32 [[TMP22]] // CHECK23-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK23-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK23-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -7389,80 +7389,80 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP5]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 5) // CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK23: omp.dispatch.cond: -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP6]], 16908288 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 16908288, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp ule i32 [[TMP9]], [[TMP10]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK23: omp.dispatch.body: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP2:%.*]] = icmp ule i32 [[TMP11]], [[TMP12]] // CHECK23-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL:%.*]] = mul i32 [[TMP13]], 127 // CHECK23-NEXT: [[ADD:%.*]] = add i32 131071, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP1]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 [[TMP15]] -// CHECK23-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i32 [[TMP18]] -// CHECK23-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX3]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL4:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK23-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP21]] -// CHECK23-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL6:%.*]] = fmul float [[MUL4]], [[TMP22]] -// CHECK23-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP0]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i32 [[TMP24]] -// CHECK23-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: store float [[MUL6]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 +// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD8:%.*]] = add i32 [[TMP25]], 1 -// CHECK23-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !20 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK23: omp.dispatch.inc: -// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD9:%.*]] = add i32 [[TMP26]], [[TMP27]] // CHECK23-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD10:%.*]] = add i32 [[TMP28]], [[TMP29]] // CHECK23-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK23: omp.dispatch.end: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK23-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -7506,9 +7506,9 @@ // CHECK23-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK23-NEXT: [[TMP2:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i8 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK23-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV:%.*]] = sext i8 [[TMP3]] to i32 // CHECK23-NEXT: [[SUB:%.*]] = sub i32 10, [[CONV]] // CHECK23-NEXT: [[SUB2:%.*]] = sub i32 [[SUB]], 1 @@ -7516,75 +7516,75 @@ // CHECK23-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK23-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK23-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i8 [[TMP4]], ptr [[I4]], align 1 -// CHECK23-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV5:%.*]] = sext i8 [[TMP5]] to i32 // CHECK23-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV5]], 10 // CHECK23-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK23: omp.precond.then: // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK23-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1 +// CHECK23-NEXT: [[TMP14:%.*]] = load i8, ptr [[TMP1]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP14]], 0 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !23 +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK23-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !23 +// CHECK23-NEXT: [[TMP17:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV9:%.*]] = sext i8 [[TMP17]] to i32 -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK23-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[MUL]] // CHECK23-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i8 -// CHECK23-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !17, !llvm.access.group !23 +// CHECK23-NEXT: store i8 [[CONV11]], ptr [[I6]], align 1, !nontemporal !18, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK23-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !23 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK23: omp.inner.for.cond13: -// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP14:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK23-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END23:%.*]] // CHECK23: omp.inner.for.body15: -// CHECK23-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP22:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV16:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL17:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK23-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV16]], [[MUL17]] // CHECK23-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 @@ -7593,25 +7593,25 @@ // CHECK23: omp.body.continue20: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC21:%.*]] // CHECK23: omp.inner.for.inc21: -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD22:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK23-NEXT: store i32 [[ADD22]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK23: omp.inner.for.end23: // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK23-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: -// CHECK23-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP29:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV24:%.*]] = sext i8 [[TMP29]] to i32 -// CHECK23-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP30:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV25:%.*]] = sext i8 [[TMP30]] to i32 // CHECK23-NEXT: [[SUB26:%.*]] = sub i32 10, [[CONV25]] // CHECK23-NEXT: [[SUB27:%.*]] = sub i32 [[SUB26]], 1 @@ -7658,65 +7658,65 @@ // CHECK23-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2 +// CHECK23-NEXT: [[TMP1:%.*]] = load i16, ptr [[TMP0]], align 2, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[CONV]]) // CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK23: omp.dispatch.cond: -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK23: omp.dispatch.body: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !28 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !28 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK23-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !28 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !28 +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !28 +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK23-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !28 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK23: omp.dispatch.inc: -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] // CHECK23-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK23: omp.dispatch.end: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK23-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK23-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: diff --git a/clang/test/OpenMP/distribute_simd_private_codegen.cpp b/clang/test/OpenMP/distribute_simd_private_codegen.cpp --- a/clang/test/OpenMP/distribute_simd_private_codegen.cpp +++ b/clang/test/OpenMP/distribute_simd_private_codegen.cpp @@ -163,60 +163,60 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store ptr [[G1]], ptr [[_TMP2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group !4 -// CHECK1-NEXT: store volatile double 1.000000e+00, ptr [[TMP8]], align 8, !llvm.access.group !4 -// CHECK1-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store volatile double 1.000000e+00, ptr [[TMP8]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[G]], ptr [[TMP9]], align 8, !llvm.access.group !4 +// CHECK1-NEXT: store ptr [[G]], ptr [[TMP9]], align 8, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group !4 -// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP10]], align 8, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store ptr [[TMP11]], ptr [[TMP10]], align 8, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[SVAR]], ptr [[TMP12]], align 8, !llvm.access.group !4 +// CHECK1-NEXT: store ptr [[SVAR]], ptr [[TMP12]], align 8, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[SFVAR]], ptr [[TMP13]], align 8, !llvm.access.group !4 -// CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group !4 +// CHECK1-NEXT: store ptr [[SFVAR]], ptr [[TMP13]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(32) [[REF_TMP]]), !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -281,60 +281,60 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store ptr [[G1]], ptr [[_TMP2]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: store volatile double 1.000000e+00, ptr [[TMP8]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store volatile double 1.000000e+00, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store i32 3, ptr [[SVAR]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store float 4.000000e+00, ptr [[SFVAR]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[G]], ptr [[TMP9]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store ptr [[G]], ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP10]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store ptr [[TMP11]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[SVAR]], ptr [[TMP12]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store ptr [[SVAR]], ptr [[TMP12]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[SFVAR]], ptr [[TMP13]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group !5 +// CHECK3-NEXT: store ptr [[SFVAR]], ptr [[TMP13]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 4 dereferenceable(16) [[REF_TMP]]), !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -408,83 +408,83 @@ // CHECK9-NEXT: store ptr [[TEST]], ptr [[VAR]], align 8 // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK9-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK9-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93() #[[ATTR4:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 -// CHECK9-NEXT: store i32 [[TMP12]], ptr [[I_CASTED]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[I_CASTED]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP13]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF6:![0-9]+]] +// CHECK9-NEXT: store i32 [[TMP11]], ptr [[I_CASTED]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[I_CASTED]], align 8, !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP12]], ptr [[TMP13]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP12]], ptr [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[KERNEL_ARGS3:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP22]], align 4 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP26]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 8 -// CHECK9-NEXT: store i64 2, ptr [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS3]]) -// CHECK9-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 -// CHECK9-NEXT: br i1 [[TMP31]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP19]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP25]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 8 +// CHECK9-NEXT: store i64 2, ptr [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS3]]) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] // CHECK9: omp_offload.failed4: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP13]]) #[[ATTR4]] +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP12]]) #[[ATTR4]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT5]] // CHECK9: omp_offload.cont5: // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP32]], [[OMP_OFFLOAD_CONT5]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP29]], [[OMP_OFFLOAD_CONT5]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK9-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK9: arraydestroy.done6: // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP33]] +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP30]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -505,7 +505,7 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK9-NEXT: ret void // @@ -556,71 +556,71 @@ // CHECK9-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group !6 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK9-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i64 0, i64 [[IDXPROM4]] -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX5]], ptr align 4 [[TMP10]], i64 4, i1 false), !llvm.access.group !6 +// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX5]], ptr align 4 [[TMP10]], i64 4, i1 false), !llvm.access.group [[ACC_GRP7]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 +// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: -// CHECK9-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK9-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF6]] +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 2 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN7]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP17]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]] @@ -670,45 +670,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I1]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I1]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK9-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -739,45 +739,45 @@ // CHECK9-NEXT: store ptr [[TEST]], ptr [[VAR]], align 8 // CHECK9-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK9-NEXT: store ptr null, ptr [[TMP3]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK9-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK9-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK9-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP8]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK9-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK9-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49() #[[ATTR4]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK9-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK9: arraydestroy.done2: // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP13]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP12]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -800,7 +800,7 @@ // CHECK9-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK9-NEXT: ret void // @@ -832,7 +832,7 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef signext [[TMP0]]) // CHECK9-NEXT: ret void // @@ -882,71 +882,71 @@ // CHECK9-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK9-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK9: omp.inner.for.cond.cleanup: // CHECK9-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group !15 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !15 +// CHECK9-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK9-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i64 0, i64 [[IDXPROM4]] -// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX5]], ptr align 4 [[TMP10]], i64 4, i1 false), !llvm.access.group !15 +// CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX5]], ptr align 4 [[TMP10]], i64 4, i1 false), !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 +// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: -// CHECK9-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 -// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK9-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK9-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF6]] +// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] +// CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +// CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAY_BEGIN7:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN7]], i64 2 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN7]], i64 2 // CHECK9-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK9: arraydestroy.body: -// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK9-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP17]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK9-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK9-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK9-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN7]] @@ -985,7 +985,7 @@ // CHECK9-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK9-NEXT: ret void // @@ -1037,83 +1037,83 @@ // CHECK11-NEXT: store ptr [[TEST]], ptr [[VAR]], align 4 // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK11-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK11-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l93() #[[ATTR4:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 -// CHECK11-NEXT: store i32 [[TMP12]], ptr [[I_CASTED]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I_CASTED]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP13]], ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP13]], ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF7:![0-9]+]] +// CHECK11-NEXT: store i32 [[TMP11]], ptr [[I_CASTED]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[I_CASTED]], align 4, !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP12]], ptr [[TMP13]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP12]], ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[KERNEL_ARGS3:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP19]], ptr [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP20]], ptr [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 8 -// CHECK11-NEXT: store i64 2, ptr [[TMP29]], align 8 -// CHECK11-NEXT: [[TMP30:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS3]]) -// CHECK11-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 -// CHECK11-NEXT: br i1 [[TMP31]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP18]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP16]], ptr [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP17]], ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes, ptr [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS3]], i32 0, i32 8 +// CHECK11-NEXT: store i64 2, ptr [[TMP26]], align 8 +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, ptr [[KERNEL_ARGS3]]) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] // CHECK11: omp_offload.failed4: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP13]]) #[[ATTR4]] +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP12]]) #[[ATTR4]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT5]] // CHECK11: omp_offload.cont5: // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP32]], [[OMP_OFFLOAD_CONT5]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP29]], [[OMP_OFFLOAD_CONT5]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK11-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK11: arraydestroy.done6: // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP33]] +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP30]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -1134,7 +1134,7 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK11-NEXT: ret void // @@ -1185,69 +1185,69 @@ // CHECK11-NEXT: call void @_ZN1SIfEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i32 0, i32 [[TMP9]] -// CHECK11-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK11-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP10]], i32 4, i1 false), !llvm.access.group !7 +// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP10]], i32 4, i1 false), !llvm.access.group [[ACC_GRP8]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 +// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: -// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK11-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF7]] +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +// CHECK11-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: // CHECK11-NEXT: store i32 2, ptr [[I]], align 4 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK11: .omp.final.done: // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN6]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP17]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]] @@ -1297,45 +1297,45 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I1]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I1]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !13 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK11-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -1366,45 +1366,45 @@ // CHECK11-NEXT: store ptr [[TEST]], ptr [[VAR]], align 4 // CHECK11-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 0, ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 0, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 // CHECK11-NEXT: store ptr null, ptr [[TMP3]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 // CHECK11-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 // CHECK11-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 // CHECK11-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 2, ptr [[TMP9]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 -// CHECK11-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 2, ptr [[TMP8]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 +// CHECK11-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49() #[[ATTR4]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK11-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[OMP_OFFLOAD_CONT]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] // CHECK11-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]] // CHECK11: arraydestroy.done2: // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP13]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP12]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -1427,7 +1427,7 @@ // CHECK11-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK11-NEXT: ret void // @@ -1459,7 +1459,7 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK11-NEXT: ret void // @@ -1509,69 +1509,69 @@ // CHECK11-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) // CHECK11-NEXT: store ptr [[VAR]], ptr [[_TMP2]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 1 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK11: omp.inner.for.cond.cleanup: // CHECK11-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC]], i32 0, i32 [[TMP9]] -// CHECK11-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !16 +// CHECK11-NEXT: store i32 [[TMP8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP2]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] // CHECK11-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP10]], i32 4, i1 false), !llvm.access.group !16 +// CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX4]], ptr align 4 [[TMP10]], i32 4, i1 false), !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !16 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 +// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: -// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 -// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP16]]) -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK11-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] +// CHECK11-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !noundef [[NOUNDEF7]] +// CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP14]]) +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF7]] +// CHECK11-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 +// CHECK11-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: // CHECK11-NEXT: store i32 2, ptr [[I]], align 4 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK11: .omp.final.done: // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAY_BEGIN6:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN6]], i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN6]], i32 2 // CHECK11-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK11: arraydestroy.body: -// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP19]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK11-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP17]], [[DOTOMP_FINAL_DONE]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK11-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK11-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK11-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN6]] @@ -1610,7 +1610,7 @@ // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF7]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK11-NEXT: ret void // @@ -1673,8 +1673,8 @@ // CHECK13-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 // CHECK13-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK13-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -1689,43 +1689,43 @@ // CHECK13-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK13: omp.inner.for.cond.cleanup: // CHECK13-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP8]] to i64 +// CHECK13-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i64 0, i64 [[IDXPROM7]] -// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group !2 +// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP6]], i64 4, i1 false), !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 2, ptr [[I]], align 4 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]] // CHECK13-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN10]], i64 2 +// CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN10]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK13: arraydestroy.body: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]] @@ -1733,44 +1733,44 @@ // CHECK13: arraydestroy.done11: // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB14]], align 4 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_UB15]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB14]], align 4 -// CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV16]], align 4 +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB14]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV16]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND18:%.*]] // CHECK13: omp.inner.for.cond18: -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB15]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[CMP19:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB15]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[CMP19:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK13-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY20:%.*]], label [[OMP_INNER_FOR_END26:%.*]] // CHECK13: omp.inner.for.body20: -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 [[TMP16]], 1 +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK13-NEXT: [[ADD22:%.*]] = add nsw i32 0, [[MUL21]] -// CHECK13-NEXT: store i32 [[ADD22]], ptr [[I17]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD22]], ptr [[I17]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE23:%.*]] // CHECK13: omp.body.continue23: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC24:%.*]] // CHECK13: omp.inner.for.inc24: -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP17]], 1 -// CHECK13-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND18]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK13-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND18]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end26: // CHECK13-NEXT: store i32 2, ptr [[I12]], align 4 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN27:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN27]], i64 2 +// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN27]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY28:%.*]] // CHECK13: arraydestroy.body28: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST29:%.*]] = phi ptr [ [[TMP18]], [[OMP_INNER_FOR_END26]] ], [ [[ARRAYDESTROY_ELEMENT30:%.*]], [[ARRAYDESTROY_BODY28]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST29:%.*]] = phi ptr [ [[TMP15]], [[OMP_INNER_FOR_END26]] ], [ [[ARRAYDESTROY_ELEMENT30:%.*]], [[ARRAYDESTROY_BODY28]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT30]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST29]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT30]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE31:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT30]], [[ARRAY_BEGIN27]] // CHECK13-NEXT: br i1 [[ARRAYDESTROY_DONE31]], label [[ARRAYDESTROY_DONE32:%.*]], label [[ARRAYDESTROY_BODY28]] // CHECK13: arraydestroy.done32: // CHECK13-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP19]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP16]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -1791,7 +1791,7 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK13-NEXT: ret void // @@ -1837,8 +1837,8 @@ // CHECK13-NEXT: store ptr undef, ptr [[_TMP1]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 // CHECK13-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i64 2 // CHECK13-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -1853,43 +1853,43 @@ // CHECK13-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK13: omp.inner.for.cond.cleanup: // CHECK13-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group !9 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP8]] to i64 +// CHECK13-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i64 0, i64 [[IDXPROM7]] -// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group !9 +// CHECK13-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX8]], ptr align 4 [[TMP6]], i64 4, i1 false), !llvm.access.group [[ACC_GRP10]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 2, ptr [[I]], align 4 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN10]], i64 2 +// CHECK13-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN10]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK13: arraydestroy.body: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN10]] @@ -1897,18 +1897,18 @@ // CHECK13: arraydestroy.done11: // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[ARRAY_BEGIN12:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN12]], i64 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN12]], i64 2 // CHECK13-NEXT: br label [[ARRAYDESTROY_BODY13:%.*]] // CHECK13: arraydestroy.body13: -// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] +// CHECK13-NEXT: [[ARRAYDESTROY_ELEMENTPAST14:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE11]] ], [ [[ARRAYDESTROY_ELEMENT15:%.*]], [[ARRAYDESTROY_BODY13]] ] // CHECK13-NEXT: [[ARRAYDESTROY_ELEMENT15]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST14]], i64 -1 // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT15]]) #[[ATTR4]] // CHECK13-NEXT: [[ARRAYDESTROY_DONE16:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT15]], [[ARRAY_BEGIN12]] // CHECK13-NEXT: br i1 [[ARRAYDESTROY_DONE16]], label [[ARRAYDESTROY_DONE17:%.*]], label [[ARRAYDESTROY_BODY13]] // CHECK13: arraydestroy.done17: // CHECK13-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP14]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP11]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -1940,7 +1940,7 @@ // CHECK13-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK13-NEXT: ret void // @@ -1963,7 +1963,7 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef signext [[TMP0]]) // CHECK13-NEXT: ret void // @@ -1998,7 +1998,7 @@ // CHECK13-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK13-NEXT: ret void // @@ -2054,8 +2054,8 @@ // CHECK15-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 // CHECK15-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK15-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2070,41 +2070,41 @@ // CHECK15-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK15: omp.inner.for.cond.cleanup: // CHECK15-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 [[TMP8]] -// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP7]], i32 4, i1 false), !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP5]] +// CHECK15-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 [[TMP7]] +// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP6]], i32 4, i1 false), !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 2, ptr [[I]], align 4 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4:[0-9]+]] // CHECK15-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN9]], i32 2 +// CHECK15-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN9]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK15: arraydestroy.body: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] @@ -2112,44 +2112,44 @@ // CHECK15: arraydestroy.done10: // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB13]], align 4 // CHECK15-NEXT: store i32 1, ptr [[DOTOMP_UB14]], align 4 -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB13]], align 4 -// CHECK15-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV15]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB13]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV15]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND17:%.*]] // CHECK15: omp.inner.for.cond17: -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB14]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[CMP18:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB14]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[CMP18:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK15-NEXT: br i1 [[CMP18]], label [[OMP_INNER_FOR_BODY19:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK15: omp.inner.for.body19: -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[MUL20:%.*]] = mul nsw i32 [[TMP16]], 1 +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[MUL20:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK15-NEXT: [[ADD21:%.*]] = add nsw i32 0, [[MUL20]] -// CHECK15-NEXT: store i32 [[ADD21]], ptr [[I16]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD21]], ptr [[I16]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE22:%.*]] // CHECK15: omp.body.continue22: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK15: omp.inner.for.inc23: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP17]], 1 -// CHECK15-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV15]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND17]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV15]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP14]], 1 +// CHECK15-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV15]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND17]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end25: // CHECK15-NEXT: store i32 2, ptr [[I11]], align 4 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN26:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN26]], i32 2 +// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN26]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY27:%.*]] // CHECK15: arraydestroy.body27: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST28:%.*]] = phi ptr [ [[TMP18]], [[OMP_INNER_FOR_END25]] ], [ [[ARRAYDESTROY_ELEMENT29:%.*]], [[ARRAYDESTROY_BODY27]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST28:%.*]] = phi ptr [ [[TMP15]], [[OMP_INNER_FOR_END25]] ], [ [[ARRAYDESTROY_ELEMENT29:%.*]], [[ARRAYDESTROY_BODY27]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT29]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAYDESTROY_ELEMENTPAST28]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT29]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE30:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT29]], [[ARRAY_BEGIN26]] // CHECK15-NEXT: br i1 [[ARRAYDESTROY_DONE30]], label [[ARRAYDESTROY_DONE31:%.*]], label [[ARRAYDESTROY_BODY27]] // CHECK15: arraydestroy.done31: // CHECK15-NEXT: call void @_ZN1SIfED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP19]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP16]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev @@ -2170,7 +2170,7 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: call void @_ZN1SIfEC2Ef(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], float noundef [[TMP0]]) // CHECK15-NEXT: ret void // @@ -2216,8 +2216,8 @@ // CHECK15-NEXT: store ptr undef, ptr [[_TMP1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 // CHECK15-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN]], i32 2 // CHECK15-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] @@ -2232,41 +2232,41 @@ // CHECK15-NEXT: store ptr [[VAR5]], ptr [[_TMP6]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK15: omp.inner.for.cond.cleanup: // CHECK15-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[TMP7:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 [[TMP8]] -// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP7]], i32 4, i1 false), !llvm.access.group !10 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC3]], i32 0, i32 [[TMP5]] +// CHECK15-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP6:%.*]] = load ptr, ptr [[_TMP6]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 [[TMP7]] +// CHECK15-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[ARRAYIDX7]], ptr align 4 [[TMP6]], i32 4, i1 false), !llvm.access.group [[ACC_GRP11]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 2, ptr [[I]], align 4 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR5]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN9]], i32 2 +// CHECK15-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN9]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] // CHECK15: arraydestroy.body: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP12]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] @@ -2274,18 +2274,18 @@ // CHECK15: arraydestroy.done10: // CHECK15-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[ARRAY_BEGIN11:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN11]], i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAY_BEGIN11]], i32 2 // CHECK15-NEXT: br label [[ARRAYDESTROY_BODY12:%.*]] // CHECK15: arraydestroy.body12: -// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP13]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] +// CHECK15-NEXT: [[ARRAYDESTROY_ELEMENTPAST13:%.*]] = phi ptr [ [[TMP10]], [[ARRAYDESTROY_DONE10]] ], [ [[ARRAYDESTROY_ELEMENT14:%.*]], [[ARRAYDESTROY_BODY12]] ] // CHECK15-NEXT: [[ARRAYDESTROY_ELEMENT14]] = getelementptr inbounds [[STRUCT_S_0]], ptr [[ARRAYDESTROY_ELEMENTPAST13]], i32 -1 // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT14]]) #[[ATTR4]] // CHECK15-NEXT: [[ARRAYDESTROY_DONE15:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT14]], [[ARRAY_BEGIN11]] // CHECK15-NEXT: br i1 [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_DONE16:%.*]], label [[ARRAYDESTROY_BODY12]] // CHECK15: arraydestroy.done16: // CHECK15-NEXT: call void @_ZN1SIiED1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP14]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP11]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev @@ -2317,7 +2317,7 @@ // CHECK15-NEXT: store float [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load float, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store float [[TMP0]], ptr [[F]], align 4 // CHECK15-NEXT: ret void // @@ -2340,7 +2340,7 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK15-NEXT: ret void // @@ -2375,7 +2375,7 @@ // CHECK15-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK15-NEXT: ret void // diff --git a/clang/test/OpenMP/distribute_simd_reduction_codegen.cpp b/clang/test/OpenMP/distribute_simd_reduction_codegen.cpp --- a/clang/test/OpenMP/distribute_simd_reduction_codegen.cpp +++ b/clang/test/OpenMP/distribute_simd_reduction_codegen.cpp @@ -92,39 +92,39 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i64 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -166,57 +166,57 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[SIVAR1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i32 2, ptr [[I]], align 4 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: ret void @@ -234,41 +234,41 @@ // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK1-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK1-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP2]]) #[[ATTR2]] +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP1]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: ret i32 0 @@ -307,57 +307,57 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i32 2, ptr [[I]], align 4 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: ret void @@ -380,39 +380,39 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i32 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -454,57 +454,57 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[SIVAR1]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i32 2, ptr [[I]], align 4 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: ret void @@ -522,41 +522,41 @@ // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP5]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK3-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP2]]) #[[ATTR2]] +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP1]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: ret i32 0 @@ -595,57 +595,57 @@ // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i32 2, ptr [[I]], align 4 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: ret void @@ -671,36 +671,36 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -722,37 +722,37 @@ // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !6 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK5-NEXT: ret i32 0 // @@ -770,36 +770,36 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() @@ -821,37 +821,37 @@ // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !7 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: ret i32 0 // @@ -900,60 +900,60 @@ // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: store i32 0, ptr [[SIVAR1]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group !4 -// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4 +// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] // CHECK9-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK9-NEXT: ret void diff --git a/clang/test/OpenMP/for_reduction_task_codegen.cpp b/clang/test/OpenMP/for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/for_reduction_task_codegen.cpp @@ -83,14 +83,14 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 0 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP5]] -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP6]], i64 9 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[LB_ADD_LEN]] @@ -126,202 +126,202 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP31]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP29]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 0 -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = sext i32 [[TMP36]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP37]] -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP38]], i64 9 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP43:%.*]] = sub i64 [[TMP41]], [[TMP42]] -// CHECK1-NEXT: [[TMP44:%.*]] = sdiv exact i64 [[TMP43]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP45:%.*]] = add nuw i64 [[TMP44]], 1 -// CHECK1-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP51]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 0 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP34]] +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP35]], i64 9 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP30]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = sub i64 [[TMP38]], [[TMP39]] +// CHECK1-NEXT: [[TMP41:%.*]] = sdiv exact i64 [[TMP40]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP42:%.*]] = add nuw i64 [[TMP41]], 1 +// CHECK1-NEXT: [[TMP43:%.*]] = mul nuw i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP43]], ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP49]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP51:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP50]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP51]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[TMP52]], align 4 -// CHECK1-NEXT: [[TMP55:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP53]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP55]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[TMP56]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP57]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP58]], 9 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[TMP52]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP53]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP54]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP59]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP55]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP60]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP56]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP61]], [[TMP62]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP57]], [[TMP58]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP63]], 1 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP59]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP64]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP67]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load i32, ptr [[TMP68]], align 4 -// CHECK1-NEXT: [[TMP70:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP69]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP70]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP72]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[TMP73]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP74]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP70]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP76]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP78]], ptr [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP79]], align 4 -// CHECK1-NEXT: [[TMP81:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP80]], ptr [[TMP70]]) +// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP60]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[TMP64]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP66:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP65]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP66]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[TMP68]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP69]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP66]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP70]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP72:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP71]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP74]], ptr [[TMP66]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP82:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP82]], 1 +// CHECK1-NEXT: [[TMP76:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP76]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP83:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP84:%.*]] = load i32, ptr [[TMP83]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP84]]) +// CHECK1-NEXT: [[TMP77:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP77]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP78]]) +// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP79]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP80]], i32 1) +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP81]], align 8 +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP84:%.*]] = inttoptr i64 [[TMP12]] to ptr +// CHECK1-NEXT: store ptr [[TMP84]], ptr [[TMP83]], align 8 // CHECK1-NEXT: [[TMP85:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP86]], i32 1) -// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP87]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP91:%.*]] = inttoptr i64 [[TMP12]] to ptr -// CHECK1-NEXT: store ptr [[TMP91]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load i32, ptr [[TMP92]], align 4 -// CHECK1-NEXT: [[TMP95:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB3:[0-9]+]], i32 [[TMP93]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP95]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP87:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB3:[0-9]+]], i32 [[TMP86]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP87]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP96]], [[TMP97]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP88]], [[TMP89]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP98]] +// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP90]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP99]] to i32 -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP100]] to i32 +// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP91]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP92]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP98]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP90]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP93]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP86]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP101:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP102:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP101]] monotonic, align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP103]] +// CHECK1-NEXT: [[TMP93:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP94:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP93]] monotonic, align 4 +// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP95]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP104:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP104]] to i32 +// CHECK1-NEXT: [[TMP96:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP96]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP105:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP110:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP105]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP106:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP106]] to i32 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP107]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP102:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP97]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP98]] to i32 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP99]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP105]], i8 [[TMP108]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP110]] = extractvalue { i8, i1 } [[TMP109]], 0 -// CHECK1-NEXT: [[TMP111:%.*]] = extractvalue { i8, i1 } [[TMP109]], 1 -// CHECK1-NEXT: br i1 [[TMP111]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP101:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP97]], i8 [[TMP100]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP102]] = extractvalue { i8, i1 } [[TMP101]], 0 +// CHECK1-NEXT: [[TMP103:%.*]] = extractvalue { i8, i1 } [[TMP101]], 1 +// CHECK1-NEXT: br i1 [[TMP103]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP103]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP95]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: -// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP93]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP86]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP112:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP112]]) -// CHECK1-NEXT: [[TMP113:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP114:%.*]] = load i32, ptr [[TMP113]], align 4 -// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP114]]) +// CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) +// CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP105]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP106]]) // CHECK1-NEXT: ret void // // @@ -332,8 +332,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -344,12 +344,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -362,7 +362,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -384,7 +384,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -393,9 +393,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -438,65 +438,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -508,38 +508,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/irbuilder_safelen.cpp b/clang/test/OpenMP/irbuilder_safelen.cpp --- a/clang/test/OpenMP/irbuilder_safelen.cpp +++ b/clang/test/OpenMP/irbuilder_safelen.cpp @@ -33,7 +33,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store ptr [[I]], ptr [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], ptr [[AGG_CAPTURED1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]) // CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 @@ -48,29 +48,29 @@ // CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK: omp_loop.body: // CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]) -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]] -// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i32 0, i32 0 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP6]] to float // CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[CONV]] -// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8 +// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[TMP7]], i32 0, i32 0 -// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP8]] to float // CHECK-NEXT: [[ADD5:%.*]] = fadd float [[ADD]], [[CONV4]] -// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP10]] to i64 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM6]] // CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_INC]] // CHECK: omp_loop.inc: // CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK: omp_loop.exit: // CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK: omp_loop.after: @@ -78,7 +78,7 @@ // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED8]], i32 0, i32 0 // CHECK-NEXT: store ptr [[J]], ptr [[TMP11]], align 8 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED9]], i32 0, i32 0 -// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4 +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: store i32 [[TMP13]], ptr [[TMP12]], align 4 // CHECK-NEXT: call void @__captured_stmt.2(ptr [[DOTCOUNT_ADDR10]], ptr [[AGG_CAPTURED8]]) // CHECK-NEXT: [[DOTCOUNT11:%.*]] = load i32, ptr [[DOTCOUNT_ADDR10]], align 4 @@ -92,18 +92,18 @@ // CHECK-NEXT: [[OMP_LOOP_CMP20:%.*]] = icmp ult i32 [[OMP_LOOP_IV19]], [[DOTCOUNT11]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP20]], label [[OMP_LOOP_BODY15:%.*]], label [[OMP_LOOP_EXIT17:%.*]] // CHECK: omp_loop.body15: -// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP6:![0-9]+]] +// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP7:![0-9]+]] // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_P]], ptr [[PP]], i32 0, i32 0 -// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM23:%.*]] = sext i32 [[TMP16]] to i64 // CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[IDXPROM23]] -// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK-NEXT: br label [[OMP_LOOP_INC16]] // CHECK: omp_loop.inc16: // CHECK-NEXT: [[OMP_LOOP_NEXT21]] = add nuw i32 [[OMP_LOOP_IV19]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK: omp_loop.exit17: // CHECK-NEXT: br label [[OMP_LOOP_AFTER18:%.*]] // CHECK: omp_loop.after18: @@ -128,10 +128,11 @@ //. // CHECK: !0 = !{i32 1, !"wchar_size", i32 4} // CHECK: !1 = !{i32 7, !"openmp", i32 45} -// CHECK: !3 = distinct !{!3, !4, !5} -// CHECK: !4 = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK: !5 = !{!"llvm.loop.vectorize.width", i32 3} -// CHECK: !6 = distinct !{} -// CHECK: !7 = distinct !{!7, !8, !4} -// CHECK: !8 = !{!"llvm.loop.parallel_accesses", !6} +// CHECK: !3 = !{} +// CHECK: !4 = distinct !{!4, !5, !6} +// CHECK: !5 = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK: !6 = !{!"llvm.loop.vectorize.width", i32 3} +// CHECK: !7 = distinct !{} +// CHECK: !8 = distinct !{!8, !9, !5} +// CHECK: !9 = !{!"llvm.loop.parallel_accesses", !7} //. diff --git a/clang/test/OpenMP/irbuilder_safelen_order_concurrent.cpp b/clang/test/OpenMP/irbuilder_safelen_order_concurrent.cpp --- a/clang/test/OpenMP/irbuilder_safelen_order_concurrent.cpp +++ b/clang/test/OpenMP/irbuilder_safelen_order_concurrent.cpp @@ -33,7 +33,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store ptr [[I]], ptr [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], ptr [[AGG_CAPTURED1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]) // CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 @@ -47,30 +47,30 @@ // CHECK-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK: omp_loop.body: -// CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]] -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !llvm.access.group [[ACC_GRP4:![0-9]+]] +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]] -// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i32 0, i32 0 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP6]] to float // CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[CONV]] -// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[TMP7]], i32 0, i32 0 -// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP8]] to float // CHECK-NEXT: [[ADD5:%.*]] = fadd float [[ADD]], [[CONV4]] -// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]] -// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP10]] to i64 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM6]] -// CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK-NEXT: br label [[OMP_LOOP_INC]] // CHECK: omp_loop.inc: // CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK: omp_loop.exit: // CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK: omp_loop.after: @@ -78,7 +78,7 @@ // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED8]], i32 0, i32 0 // CHECK-NEXT: store ptr [[J]], ptr [[TMP11]], align 8 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED9]], i32 0, i32 0 -// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4 +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: store i32 [[TMP13]], ptr [[TMP12]], align 4 // CHECK-NEXT: call void @__captured_stmt.2(ptr [[DOTCOUNT_ADDR10]], ptr [[AGG_CAPTURED8]]) // CHECK-NEXT: [[DOTCOUNT11:%.*]] = load i32, ptr [[DOTCOUNT_ADDR10]], align 4 @@ -92,18 +92,18 @@ // CHECK-NEXT: [[OMP_LOOP_CMP20:%.*]] = icmp ult i32 [[OMP_LOOP_IV19]], [[DOTCOUNT11]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP20]], label [[OMP_LOOP_BODY15:%.*]], label [[OMP_LOOP_EXIT17:%.*]] // CHECK: omp_loop.body15: -// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP8:![0-9]+]] +// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP9:![0-9]+]] // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_P]], ptr [[PP]], i32 0, i32 0 -// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP8]] -// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP8]] -// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM23:%.*]] = sext i32 [[TMP16]] to i64 // CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[IDXPROM23]] -// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK-NEXT: br label [[OMP_LOOP_INC16]] // CHECK: omp_loop.inc16: // CHECK-NEXT: [[OMP_LOOP_NEXT21]] = add nuw i32 [[OMP_LOOP_IV19]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK: omp_loop.exit17: // CHECK-NEXT: br label [[OMP_LOOP_AFTER18:%.*]] // CHECK: omp_loop.after18: @@ -128,12 +128,13 @@ //. // CHECK: !0 = !{i32 1, !"wchar_size", i32 4} // CHECK: !1 = !{i32 7, !"openmp", i32 50} -// CHECK: !3 = distinct !{} -// CHECK: !4 = distinct !{!4, !5, !6, !7} -// CHECK: !5 = !{!"llvm.loop.parallel_accesses", !3} -// CHECK: !6 = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK: !7 = !{!"llvm.loop.vectorize.width", i32 3} -// CHECK: !8 = distinct !{} -// CHECK: !9 = distinct !{!9, !10, !6} -// CHECK: !10 = !{!"llvm.loop.parallel_accesses", !8} +// CHECK: !3 = !{} +// CHECK: !4 = distinct !{} +// CHECK: !5 = distinct !{!5, !6, !7, !8} +// CHECK: !6 = !{!"llvm.loop.parallel_accesses", !4} +// CHECK: !7 = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK: !8 = !{!"llvm.loop.vectorize.width", i32 3} +// CHECK: !9 = distinct !{} +// CHECK: !10 = distinct !{!10, !11, !7} +// CHECK: !11 = !{!"llvm.loop.parallel_accesses", !9} //. diff --git a/clang/test/OpenMP/irbuilder_simdlen.cpp b/clang/test/OpenMP/irbuilder_simdlen.cpp --- a/clang/test/OpenMP/irbuilder_simdlen.cpp +++ b/clang/test/OpenMP/irbuilder_simdlen.cpp @@ -33,7 +33,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store ptr [[I]], ptr [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], ptr [[AGG_CAPTURED1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]) // CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 @@ -47,30 +47,30 @@ // CHECK-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[DOTCOUNT]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK: omp_loop.body: -// CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]] -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]), !llvm.access.group [[ACC_GRP4:![0-9]+]] +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]] -// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i32 0, i32 0 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP6]] to float // CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[CONV]] -// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[TMP7]], i32 0, i32 0 -// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP8]] to float // CHECK-NEXT: [[ADD5:%.*]] = fadd float [[ADD]], [[CONV4]] -// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP3]] -// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP10]] to i64 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM6]] -// CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK-NEXT: br label [[OMP_LOOP_INC]] // CHECK: omp_loop.inc: // CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK: omp_loop.exit: // CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK: omp_loop.after: @@ -78,7 +78,7 @@ // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED8]], i32 0, i32 0 // CHECK-NEXT: store ptr [[J]], ptr [[TMP11]], align 8 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED9]], i32 0, i32 0 -// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4 +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: store i32 [[TMP13]], ptr [[TMP12]], align 4 // CHECK-NEXT: call void @__captured_stmt.2(ptr [[DOTCOUNT_ADDR10]], ptr [[AGG_CAPTURED8]]) // CHECK-NEXT: [[DOTCOUNT11:%.*]] = load i32, ptr [[DOTCOUNT_ADDR10]], align 4 @@ -92,18 +92,18 @@ // CHECK-NEXT: [[OMP_LOOP_CMP20:%.*]] = icmp ult i32 [[OMP_LOOP_IV19]], [[DOTCOUNT11]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP20]], label [[OMP_LOOP_BODY15:%.*]], label [[OMP_LOOP_EXIT17:%.*]] // CHECK: omp_loop.body15: -// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP8:![0-9]+]] +// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP9:![0-9]+]] // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_P]], ptr [[PP]], i32 0, i32 0 -// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP8]] -// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP8]] -// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM23:%.*]] = sext i32 [[TMP16]] to i64 // CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[IDXPROM23]] -// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK-NEXT: br label [[OMP_LOOP_INC16]] // CHECK: omp_loop.inc16: // CHECK-NEXT: [[OMP_LOOP_NEXT21]] = add nuw i32 [[OMP_LOOP_IV19]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK: omp_loop.exit17: // CHECK-NEXT: br label [[OMP_LOOP_AFTER18:%.*]] // CHECK: omp_loop.after18: @@ -128,12 +128,13 @@ //. // CHECK: !0 = !{i32 1, !"wchar_size", i32 4} // CHECK: !1 = !{i32 7, !"openmp", i32 45} -// CHECK: !3 = distinct !{} -// CHECK: !4 = distinct !{!4, !5, !6, !7} -// CHECK: !5 = !{!"llvm.loop.parallel_accesses", !3} -// CHECK: !6 = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK: !7 = !{!"llvm.loop.vectorize.width", i32 3} -// CHECK: !8 = distinct !{} -// CHECK: !9 = distinct !{!9, !10, !6} -// CHECK: !10 = !{!"llvm.loop.parallel_accesses", !8} +// CHECK: !3 = !{} +// CHECK: !4 = distinct !{} +// CHECK: !5 = distinct !{!5, !6, !7, !8} +// CHECK: !6 = !{!"llvm.loop.parallel_accesses", !4} +// CHECK: !7 = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK: !8 = !{!"llvm.loop.vectorize.width", i32 3} +// CHECK: !9 = distinct !{} +// CHECK: !10 = distinct !{!10, !11, !7} +// CHECK: !11 = !{!"llvm.loop.parallel_accesses", !9} //. diff --git a/clang/test/OpenMP/irbuilder_simdlen_safelen.cpp b/clang/test/OpenMP/irbuilder_simdlen_safelen.cpp --- a/clang/test/OpenMP/irbuilder_simdlen_safelen.cpp +++ b/clang/test/OpenMP/irbuilder_simdlen_safelen.cpp @@ -33,7 +33,7 @@ // CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store ptr [[I]], ptr [[TMP0]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], ptr [[AGG_CAPTURED1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(ptr [[DOTCOUNT_ADDR]], ptr [[AGG_CAPTURED]]) // CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, ptr [[DOTCOUNT_ADDR]], align 4 @@ -48,29 +48,29 @@ // CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]] // CHECK: omp_loop.body: // CHECK-NEXT: call void @__captured_stmt.1(ptr [[I]], i32 [[OMP_LOOP_IV]], ptr [[AGG_CAPTURED1]]) -// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]] -// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[S]], i32 0, i32 0 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4 +// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A2]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP6]] to float // CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP5]], [[CONV]] -// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8 +// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[P]], align 8, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[TMP7]], i32 0, i32 0 -// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP8]] to float // CHECK-NEXT: [[ADD5:%.*]] = fadd float [[ADD]], [[CONV4]] -// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 +// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP10]] to i64 // CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM6]] // CHECK-NEXT: store float [[ADD5]], ptr [[ARRAYIDX7]], align 4 // CHECK-NEXT: br label [[OMP_LOOP_INC]] // CHECK: omp_loop.inc: // CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK: omp_loop.exit: // CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]] // CHECK: omp_loop.after: @@ -78,7 +78,7 @@ // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[AGG_CAPTURED8]], i32 0, i32 0 // CHECK-NEXT: store ptr [[J]], ptr [[TMP11]], align 8 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED9]], i32 0, i32 0 -// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4 +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[J]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: store i32 [[TMP13]], ptr [[TMP12]], align 4 // CHECK-NEXT: call void @__captured_stmt.2(ptr [[DOTCOUNT_ADDR10]], ptr [[AGG_CAPTURED8]]) // CHECK-NEXT: [[DOTCOUNT11:%.*]] = load i32, ptr [[DOTCOUNT_ADDR10]], align 4 @@ -92,18 +92,18 @@ // CHECK-NEXT: [[OMP_LOOP_CMP20:%.*]] = icmp ult i32 [[OMP_LOOP_IV19]], [[DOTCOUNT11]] // CHECK-NEXT: br i1 [[OMP_LOOP_CMP20]], label [[OMP_LOOP_BODY15:%.*]], label [[OMP_LOOP_EXIT17:%.*]] // CHECK: omp_loop.body15: -// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP6:![0-9]+]] +// CHECK-NEXT: call void @__captured_stmt.3(ptr [[J]], i32 [[OMP_LOOP_IV19]], ptr [[AGG_CAPTURED9]]), !llvm.access.group [[ACC_GRP7:![0-9]+]] // CHECK-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_P]], ptr [[PP]], i32 0, i32 0 -// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[A22]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF3]] // CHECK-NEXT: [[IDXPROM23:%.*]] = sext i32 [[TMP16]] to i64 // CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[IDXPROM23]] -// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK-NEXT: store i32 [[TMP14]], ptr [[ARRAYIDX24]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK-NEXT: br label [[OMP_LOOP_INC16]] // CHECK: omp_loop.inc16: // CHECK-NEXT: [[OMP_LOOP_NEXT21]] = add nuw i32 [[OMP_LOOP_IV19]], 1 -// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK-NEXT: br label [[OMP_LOOP_HEADER13]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK: omp_loop.exit17: // CHECK-NEXT: br label [[OMP_LOOP_AFTER18:%.*]] // CHECK: omp_loop.after18: @@ -128,10 +128,11 @@ //. // CHECK: !0 = !{i32 1, !"wchar_size", i32 4} // CHECK: !1 = !{i32 7, !"openmp", i32 45} -// CHECK: !3 = distinct !{!3, !4, !5} -// CHECK: !4 = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK: !5 = !{!"llvm.loop.vectorize.width", i32 2} -// CHECK: !6 = distinct !{} -// CHECK: !7 = distinct !{!7, !8, !4} -// CHECK: !8 = !{!"llvm.loop.parallel_accesses", !6} +// CHECK: !3 = !{} +// CHECK: !4 = distinct !{!4, !5, !6} +// CHECK: !5 = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK: !6 = !{!"llvm.loop.vectorize.width", i32 2} +// CHECK: !7 = distinct !{} +// CHECK: !8 = distinct !{!8, !9, !5} +// CHECK: !9 = !{!"llvm.loop.parallel_accesses", !7} //. diff --git a/clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp b/clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp --- a/clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/master_taskloop_in_reduction_codegen.cpp @@ -69,7 +69,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -209,8 +209,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -237,8 +237,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP4]], [[TMP5]] // CHECK1-NEXT: store float [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -265,8 +265,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -385,7 +385,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP3]], i64 [[TMP5]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -408,7 +408,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP5]], i64 [[TMP4]] @@ -417,9 +417,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 @@ -452,12 +452,12 @@ // CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTTASK_RED_1]], ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP6]]) // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -476,14 +476,14 @@ // CHECK1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP15]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP14]], i32 0, i32 1 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP18]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP18]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP23]], align 8 @@ -493,7 +493,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP6]], ptr [[TMP14]], i32 1, ptr [[TMP23]], ptr [[TMP24]], i64 [[TMP27]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP6]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP6]]) @@ -543,50 +543,50 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP22]](ptr [[TMP23]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP27]], ptr [[TMP26]]) // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 @@ -594,32 +594,32 @@ // CHECK1-NEXT: [[TMP33:%.*]] = udiv exact i64 [[TMP32]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP28]], ptr @{{reduction_size[.].+[.]}}) // CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP35]], ptr [[TMP31]]) -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !15 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP41]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, ptr [[TMP36]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2 +// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV3_I:%.*]] = sext i16 [[TMP42]] to i32 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP43]], [[CONV3_I]] // CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD5_I:%.*]] = add nsw i32 [[TMP44]], 1 -// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: .omp_outlined..9.exit: // CHECK1-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp b/clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp --- a/clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/master_taskloop_simd_in_reduction_codegen.cpp @@ -69,7 +69,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -209,8 +209,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -237,8 +237,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP4]], [[TMP5]] // CHECK1-NEXT: store float [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -265,8 +265,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -385,7 +385,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP3]], i64 [[TMP5]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -408,7 +408,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP5]], i64 [[TMP4]] @@ -417,9 +417,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 @@ -452,12 +452,12 @@ // CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTTASK_RED_1]], ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP6]]) // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -476,14 +476,14 @@ // CHECK1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP6]], i32 1, i64 96, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP15]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP14]], i32 0, i32 1 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP18]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP18]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP23]], align 8 @@ -493,7 +493,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP26]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP6]], ptr [[TMP14]], i32 1, ptr [[TMP23]], ptr [[TMP24]], i64 [[TMP27]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP6]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP6]]) @@ -543,50 +543,50 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP22]](ptr [[TMP23]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP27]], ptr [[TMP26]]) // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 @@ -594,33 +594,33 @@ // CHECK1-NEXT: [[TMP33:%.*]] = udiv exact i64 [[TMP32]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP28]], ptr @{{reduction_size[.].+[.]}}) // CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP35]], ptr [[TMP31]]) -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP41]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, ptr [[TMP36]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV3_I:%.*]] = sext i16 [[TMP42]] to i32 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP43]], [[CONV3_I]] -// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD5_I:%.*]] = add nsw i32 [[TMP44]], 1 -// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: .omp_outlined..9.exit: // CHECK1-NEXT: ret i32 0 // @@ -700,7 +700,7 @@ // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK3: arrayctor.cont: -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -708,37 +708,37 @@ // CHECK3-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 4, ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP3]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV2:%.*]] = sext i32 [[TMP4]] to i64 -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP5]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[CONV3]] -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 5, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 diff --git a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp --- a/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp +++ b/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen_tbaa_PR46146.cpp @@ -79,68 +79,68 @@ // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 99 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14:![0-9]+]] +// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA14:![0-9]+]] +// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA15:![0-9]+]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR6]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP2]], align 4, !tbaa [[TBAA14]] +// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP2]], align 4, !tbaa [[TBAA15]] // CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP2]]) #[[ATTR12:[0-9]+]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP2]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP18]], 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4 // CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP19]], 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[ADD4]], 4 // CHECK1-NEXT: store i32 [[MUL5]], ptr [[IEND]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ISTART]], ptr [[TMP20]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[IEND]], ptr [[TMP22]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr [[PARTIAL_SUM]], ptr [[TMP24]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 3) +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ISTART]], ptr [[TMP10]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[IEND]], ptr [[TMP11]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr [[PARTIAL_SUM]], ptr [[TMP12]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 3) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP27]], 1 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP7]]) +// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]]) // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR6]] @@ -205,99 +205,99 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP9]], [[TMP10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP12]], ptr [[I]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP14]], [[TMP15]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR6]] // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[PARTIAL_SUM5]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA14]] +// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP]], align 4, !tbaa [[TBAA15]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR6]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP6]], align 4, !tbaa [[TBAA14]] +// CHECK1-NEXT: store float 0.000000e+00, ptr [[REF_TMP6]], align 4, !tbaa [[TBAA15]] // CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP6]]) #[[ATTR12]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP6]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3:[0-9]+]], i32 [[TMP28]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3:[0-9]+]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[TMP29]], [[TMP30]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE]] ], [ [[TMP32]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP35]], 1 -// CHECK1-NEXT: [[CMP10:%.*]] = icmp ult i32 [[TMP34]], [[ADD9]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP19]], 1 +// CHECK1-NEXT: [[CMP10:%.*]] = icmp ult i32 [[TMP18]], [[ADD9]] // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]] // CHECK1: omp.dispatch.cleanup: // CHECK1-NEXT: br label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP37]], 1 -// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP36]], [[ADD11]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP21]], 1 +// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP20]], [[ADD11]] // CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP39]], 1 -// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP38]], [[MUL]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]] // CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP14]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP15]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP42]] to float -// CHECK1-NEXT: store float [[CONV]], ptr [[REF_TMP15]], align 4, !tbaa [[TBAA14]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to float +// CHECK1-NEXT: store float [[CONV]], ptr [[REF_TMP15]], align 4, !tbaa [[TBAA15]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP44]] to float -// CHECK1-NEXT: store float [[CONV17]], ptr [[REF_TMP16]], align 4, !tbaa [[TBAA14]] +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to float +// CHECK1-NEXT: store float [[CONV17]], ptr [[REF_TMP16]], align 4, !tbaa [[TBAA15]] // CHECK1-NEXT: call void @_ZNSt7complexIfEC1ERKfS2_(ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP15]], ptr nonnull align 4 dereferenceable(4) [[REF_TMP16]]) #[[ATTR12]] // CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]], ptr nonnull align 4 dereferenceable(8) [[REF_TMP14]]) #[[ATTR12]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[REF_TMP16]]) #[[ATTR6]] @@ -307,36 +307,36 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP48]], 1 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP26]], 1 // CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD19:%.*]] = add i32 [[TMP49]], [[TMP50]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD19:%.*]] = add i32 [[TMP27]], [[TMP28]] // CHECK1-NEXT: store i32 [[ADD19]], ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP51]], [[TMP52]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP29]], [[TMP30]] // CHECK1-NEXT: store i32 [[ADD20]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: -// CHECK1-NEXT: [[TMP53:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP54]]) -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[PARTIAL_SUM5]], ptr [[TMP57]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP56]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func) -// CHECK1-NEXT: [[TMP61:%.*]] = icmp eq i32 [[TMP60]], 1 -// CHECK1-NEXT: br i1 [[TMP61]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP32]]) +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[PARTIAL_SUM5]], ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP34]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func) +// CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i32 [[TMP36]], 1 +// CHECK1-NEXT: br i1 [[TMP37]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] // CHECK1: .omp.reduction.then: // CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 4 dereferenceable(8) ptr @_ZNSt7complexIfEpLIfEERS0_RKS_IT_E(ptr nonnull align 4 dereferenceable(8) [[TMP2]], ptr nonnull align 4 dereferenceable(8) [[PARTIAL_SUM5]]) #[[ATTR12]] -// CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP56]]) +// CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP34]]) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]] // CHECK1: .omp.reduction.done: // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR6]] @@ -365,15 +365,15 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__C_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[CALL:%.*]] = call float @_ZNKSt7complexIfE4realEv(ptr nonnull align 4 dereferenceable(8) [[TMP0]]) #[[ATTR12]] // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP1:%.*]] = load float, ptr [[__RE_]], align 4, !tbaa [[TBAA16:![0-9]+]] +// CHECK1-NEXT: [[TMP1:%.*]] = load float, ptr [[__RE_]], align 4, !tbaa [[TBAA17:![0-9]+]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CALL]] -// CHECK1-NEXT: store float [[ADD]], ptr [[__RE_]], align 4, !tbaa [[TBAA16]] +// CHECK1-NEXT: store float [[ADD]], ptr [[__RE_]], align 4, !tbaa [[TBAA17]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__C_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[CALL2:%.*]] = call float @_ZNKSt7complexIfE4imagEv(ptr nonnull align 4 dereferenceable(8) [[TMP2]]) #[[ATTR12]] // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load float, ptr [[__IM_]], align 4, !tbaa [[TBAA18:![0-9]+]] +// CHECK1-NEXT: [[TMP3:%.*]] = load float, ptr [[__IM_]], align 4, !tbaa [[TBAA19:![0-9]+]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: [[ADD3:%.*]] = fadd float [[TMP3]], [[CALL2]] -// CHECK1-NEXT: store float [[ADD3]], ptr [[__IM_]], align 4, !tbaa [[TBAA18]] +// CHECK1-NEXT: store float [[ADD3]], ptr [[__IM_]], align 4, !tbaa [[TBAA19]] // CHECK1-NEXT: ret ptr [[THIS1]] // // @@ -387,54 +387,54 @@ // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca %"class.std::complex", align 4 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2, !tbaa [[TBAA19:![0-9]+]] -// CHECK1-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR1]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR2]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[DOTADDR3]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr %"class.std::complex", ptr [[TMP11]], i64 1 -// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size() -// CHECK1-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 -// CHECK1-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]]) -// CHECK1-NEXT: store i64 [[TMP20]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[TMP11]], i64 1 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1 -// CHECK1-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP12]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0 -// CHECK1-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1 -// CHECK1-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] +// CHECK1-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2, !tbaa [[TBAA20:![0-9]+]] +// CHECK1-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2, !tbaa [[TBAA20]] +// CHECK1-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2, !tbaa [[TBAA20]] +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr %"class.std::complex", ptr [[TMP9]], i64 1 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 4, !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size() +// CHECK1-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16 +// CHECK1-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]]) +// CHECK1-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i64 1 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1 +// CHECK1-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0 +// CHECK1-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1 +// CHECK1-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]] +// CHECK1-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2 +// CHECK1-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1 +// CHECK1-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0 +// CHECK1-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]] +// CHECK1-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0 // CHECK1-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]] -// CHECK1-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2 -// CHECK1-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1 -// CHECK1-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0 -// CHECK1-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]] -// CHECK1-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0 -// CHECK1-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]] -// CHECK1-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]] -// CHECK1-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]] -// CHECK1-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]] +// CHECK1-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]] +// CHECK1-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]] +// CHECK1-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]] // CHECK1: then: // CHECK1-NEXT: call void @"_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR6]] // CHECK1-NEXT: br label [[IFCONT:%.*]] // CHECK1: else: // CHECK1-NEXT: br label [[IFCONT]] // CHECK1: ifcont: -// CHECK1-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1 -// CHECK1-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]] -// CHECK1-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]] +// CHECK1-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1 +// CHECK1-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]] +// CHECK1-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]] // CHECK1: then4: -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP44]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP46]], ptr align 4 [[TMP43]], i64 8, i1 false), !tbaa.struct !21 +// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 8 +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP36]], ptr align 4 [[TMP34]], i64 8, i1 false), !tbaa.struct !22 // CHECK1-NEXT: br label [[IFCONT6:%.*]] // CHECK1: else5: // CHECK1-NEXT: br label [[IFCONT6]] @@ -456,45 +456,45 @@ // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK1-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[PRECOND:%.*]] // CHECK1: precond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2 -// CHECK1-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2 +// CHECK1-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]] // CHECK1: body: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP2]]) // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] // CHECK1: then: -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP11]], i32 [[TMP8]] -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP13]], align 4 -// CHECK1-NEXT: store volatile i32 [[TMP15]], ptr addrspace(3) [[TMP14]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]] +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4, !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4 // CHECK1-NEXT: br label [[IFCONT:%.*]] // CHECK1: else: // CHECK1-NEXT: br label [[IFCONT]] // CHECK1: ifcont: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]] // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] // CHECK1: then2: -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP19]], i32 [[TMP8]] -// CHECK1-NEXT: [[TMP22:%.*]] = load volatile i32, ptr addrspace(3) [[TMP17]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]] +// CHECK1-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[IFCONT4:%.*]] // CHECK1: else3: // CHECK1-NEXT: br label [[IFCONT4]] // CHECK1: ifcont4: -// CHECK1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[TMP23]], ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[PRECOND]] // CHECK1: exit: // CHECK1-NEXT: ret void @@ -507,18 +507,18 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2, !tbaa [[TBAA19]] +// CHECK1-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2, !tbaa [[TBAA20]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 // CHECK1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP3]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP6]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP9]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP5]], ptr [[TMP8]], ptr [[TMP11]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP5]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP4]], ptr [[TMP6]], ptr [[TMP8]]) #[[ATTR6]] // CHECK1-NEXT: ret void // // @@ -571,68 +571,68 @@ // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[IB]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 99 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP9]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[IB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA22:![0-9]+]] +// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA23:![0-9]+]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR6]] -// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP2]], align 8, !tbaa [[TBAA22]] +// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP2]], align 8, !tbaa [[TBAA23]] // CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP2]]) #[[ATTR12]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP2]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP18]], 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP8]], 4 // CHECK1-NEXT: store i32 [[MUL3]], ptr [[ISTART]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP19]], 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[IB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[ADD4]], 4 // CHECK1-NEXT: store i32 [[MUL5]], ptr [[IEND]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ISTART]], ptr [[TMP20]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[IEND]], ptr [[TMP22]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 -// CHECK1-NEXT: store ptr [[PARTIAL_SUM]], ptr [[TMP24]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP7]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 3) +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ISTART]], ptr [[TMP10]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[IEND]], ptr [[TMP11]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2 +// CHECK1-NEXT: store ptr [[PARTIAL_SUM]], ptr [[TMP12]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 3) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP27]], 1 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP7]]) +// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP1]]) // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[IB]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR6]] @@ -697,99 +697,99 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[PARTIAL_SUM_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IV]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_1]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTCAPTURE_EXPR_2]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP9]], [[TMP10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP12]], ptr [[I]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP7]], ptr [[I]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP14]], [[TMP15]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_LB]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_UB]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_STRIDE]]) #[[ATTR6]] // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[DOTOMP_IS_LAST]]) #[[ATTR6]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[PARTIAL_SUM5]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR6]] -// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA22]] +// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP]], align 8, !tbaa [[TBAA23]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR6]] -// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP6]], align 8, !tbaa [[TBAA22]] +// CHECK1-NEXT: store double 0.000000e+00, ptr [[REF_TMP6]], align 8, !tbaa [[TBAA23]] // CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP6]]) #[[ATTR12]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP6]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 4, ptr [[I7]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3]], i32 [[TMP28]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB3]], i32 [[TMP12]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[TMP29]], [[TMP30]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CMP8:%.*]] = icmp ugt i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP8]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP31]], [[COND_TRUE]] ], [ [[TMP32]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP35]], 1 -// CHECK1-NEXT: [[CMP10:%.*]] = icmp ult i32 [[TMP34]], [[ADD9]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP19]], 1 +// CHECK1-NEXT: [[CMP10:%.*]] = icmp ult i32 [[TMP18]], [[ADD9]] // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_CLEANUP:%.*]] // CHECK1: omp.dispatch.cleanup: // CHECK1-NEXT: br label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP37]], 1 -// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP36]], [[ADD11]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD11:%.*]] = add i32 [[TMP21]], 1 +// CHECK1-NEXT: [[CMP12:%.*]] = icmp ult i32 [[TMP20]], [[ADD11]] // CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP39]], 1 -// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP38]], [[MUL]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP23]], 1 +// CHECK1-NEXT: [[ADD13:%.*]] = add i32 [[TMP22]], [[MUL]] // CHECK1-NEXT: store i32 [[ADD13]], ptr [[I7]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr [[REF_TMP14]]) #[[ATTR6]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP15]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP42]] to double -// CHECK1-NEXT: store double [[CONV]], ptr [[REF_TMP15]], align 8, !tbaa [[TBAA22]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP24]] to double +// CHECK1-NEXT: store double [[CONV]], ptr [[REF_TMP15]], align 8, !tbaa [[TBAA23]] // CHECK1-NEXT: call void @llvm.lifetime.start.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP44]] to double -// CHECK1-NEXT: store double [[CONV17]], ptr [[REF_TMP16]], align 8, !tbaa [[TBAA22]] +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[I7]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[CONV17:%.*]] = sitofp i32 [[TMP25]] to double +// CHECK1-NEXT: store double [[CONV17]], ptr [[REF_TMP16]], align 8, !tbaa [[TBAA23]] // CHECK1-NEXT: call void @_ZNSt7complexIdEC1ERKdS2_(ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP15]], ptr nonnull align 8 dereferenceable(8) [[REF_TMP16]]) #[[ATTR12]] // CHECK1-NEXT: [[CALL:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]], ptr nonnull align 8 dereferenceable(16) [[REF_TMP14]]) #[[ATTR12]] // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 8, ptr [[REF_TMP16]]) #[[ATTR6]] @@ -799,36 +799,36 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP48]], 1 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD18:%.*]] = add i32 [[TMP26]], 1 // CHECK1-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD19:%.*]] = add i32 [[TMP49]], [[TMP50]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD19:%.*]] = add i32 [[TMP27]], [[TMP28]] // CHECK1-NEXT: store i32 [[ADD19]], ptr [[DOTOMP_LB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP51]], [[TMP52]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[TMP29]], [[TMP30]] // CHECK1-NEXT: store i32 [[ADD20]], ptr [[DOTOMP_UB]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: -// CHECK1-NEXT: [[TMP53:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP54]]) -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[PARTIAL_SUM5]], ptr [[TMP57]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP56]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func5, ptr @_omp_reduction_inter_warp_copy_func6) -// CHECK1-NEXT: [[TMP61:%.*]] = icmp eq i32 [[TMP60]], 1 -// CHECK1-NEXT: br i1 [[TMP61]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP32]]) +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[PARTIAL_SUM5]], ptr [[TMP35]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i32 [[TMP34]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func5, ptr @_omp_reduction_inter_warp_copy_func6) +// CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i32 [[TMP36]], 1 +// CHECK1-NEXT: br i1 [[TMP37]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] // CHECK1: .omp.reduction.then: // CHECK1-NEXT: [[CALL21:%.*]] = call nonnull align 8 dereferenceable(16) ptr @_ZNSt7complexIdEpLIdEERS0_RKS_IT_E(ptr nonnull align 8 dereferenceable(16) [[TMP2]], ptr nonnull align 8 dereferenceable(16) [[PARTIAL_SUM5]]) #[[ATTR12]] -// CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP56]]) +// CHECK1-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP34]]) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DONE]] // CHECK1: .omp.reduction.done: // CHECK1-NEXT: call void @llvm.lifetime.end.p0(i64 4, ptr [[I7]]) #[[ATTR6]] @@ -857,15 +857,15 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__C_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[CALL:%.*]] = call double @_ZNKSt7complexIdE4realEv(ptr nonnull align 8 dereferenceable(16) [[TMP0]]) #[[ATTR12]] // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP1:%.*]] = load double, ptr [[__RE_]], align 8, !tbaa [[TBAA24:![0-9]+]] +// CHECK1-NEXT: [[TMP1:%.*]] = load double, ptr [[__RE_]], align 8, !tbaa [[TBAA25:![0-9]+]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], [[CALL]] -// CHECK1-NEXT: store double [[ADD]], ptr [[__RE_]], align 8, !tbaa [[TBAA24]] +// CHECK1-NEXT: store double [[ADD]], ptr [[__RE_]], align 8, !tbaa [[TBAA25]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__C_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[CALL2:%.*]] = call double @_ZNKSt7complexIdE4imagEv(ptr nonnull align 8 dereferenceable(16) [[TMP2]]) #[[ATTR12]] // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[__IM_]], align 8, !tbaa [[TBAA26:![0-9]+]] +// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[__IM_]], align 8, !tbaa [[TBAA27:![0-9]+]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: [[ADD3:%.*]] = fadd double [[TMP3]], [[CALL2]] -// CHECK1-NEXT: store double [[ADD3]], ptr [[__IM_]], align 8, !tbaa [[TBAA26]] +// CHECK1-NEXT: store double [[ADD3]], ptr [[__IM_]], align 8, !tbaa [[TBAA27]] // CHECK1-NEXT: ret ptr [[THIS1]] // // @@ -879,67 +879,67 @@ // CHECK1-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca %"class.std::complex.0", align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR1]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR2]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[DOTADDR3]], align 2, !tbaa [[TBAA19]] -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr %"class.std::complex.0", ptr [[TMP11]], i64 1 +// CHECK1-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2, !tbaa [[TBAA20]] +// CHECK1-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2, !tbaa [[TBAA20]] +// CHECK1-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2, !tbaa [[TBAA20]] +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2, !tbaa [[TBAA20]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr %"class.std::complex.0", ptr [[TMP9]], i64 1 // CHECK1-NEXT: br label [[DOTSHUFFLE_PRE_COND:%.*]] // CHECK1: .shuffle.pre_cond: -// CHECK1-NEXT: [[TMP17:%.*]] = phi ptr [ [[TMP11]], [[ENTRY:%.*]] ], [ [[TMP29:%.*]], [[DOTSHUFFLE_THEN:%.*]] ] -// CHECK1-NEXT: [[TMP18:%.*]] = phi ptr [ [[DOTOMP_REDUCTION_ELEMENT]], [[ENTRY]] ], [ [[TMP30:%.*]], [[DOTSHUFFLE_THEN]] ] -// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint ptr [[TMP13]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]] -// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP24:%.*]] = icmp sgt i64 [[TMP23]], 7 -// CHECK1-NEXT: br i1 [[TMP24]], label [[DOTSHUFFLE_THEN]], label [[DOTSHUFFLE_EXIT:%.*]] +// CHECK1-NEXT: [[TMP12:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[TMP23:%.*]], [[DOTSHUFFLE_THEN:%.*]] ] +// CHECK1-NEXT: [[TMP13:%.*]] = phi ptr [ [[DOTOMP_REDUCTION_ELEMENT]], [[ENTRY]] ], [ [[TMP24:%.*]], [[DOTSHUFFLE_THEN]] ] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP11]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[TMP12]] to i64 +// CHECK1-NEXT: [[TMP16:%.*]] = sub i64 [[TMP14]], [[TMP15]] +// CHECK1-NEXT: [[TMP17:%.*]] = sdiv exact i64 [[TMP16]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP18:%.*]] = icmp sgt i64 [[TMP17]], 7 +// CHECK1-NEXT: br i1 [[TMP18]], label [[DOTSHUFFLE_THEN]], label [[DOTSHUFFLE_EXIT:%.*]] // CHECK1: .shuffle.then: -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_get_warp_size() -// CHECK1-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16 -// CHECK1-NEXT: [[TMP28:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP25]], i16 [[TMP7]], i16 [[TMP27]]) -// CHECK1-NEXT: store i64 [[TMP28]], ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP29]] = getelementptr i64, ptr [[TMP17]], i64 1 -// CHECK1-NEXT: [[TMP30]] = getelementptr i64, ptr [[TMP18]], i64 1 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_get_warp_size() +// CHECK1-NEXT: [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16 +// CHECK1-NEXT: [[TMP22:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP19]], i16 [[TMP6]], i16 [[TMP21]]) +// CHECK1-NEXT: store i64 [[TMP22]], ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP23]] = getelementptr i64, ptr [[TMP12]], i64 1 +// CHECK1-NEXT: [[TMP24]] = getelementptr i64, ptr [[TMP13]], i64 1 // CHECK1-NEXT: br label [[DOTSHUFFLE_PRE_COND]] // CHECK1: .shuffle.exit: -// CHECK1-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP12]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP32:%.*]] = icmp eq i16 [[TMP8]], 0 -// CHECK1-NEXT: [[TMP33:%.*]] = icmp eq i16 [[TMP8]], 1 -// CHECK1-NEXT: [[TMP34:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: [[TMP35:%.*]] = and i1 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP8]], 2 -// CHECK1-NEXT: [[TMP37:%.*]] = and i16 [[TMP6]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP37]], 0 -// CHECK1-NEXT: [[TMP39:%.*]] = and i1 [[TMP36]], [[TMP38]] -// CHECK1-NEXT: [[TMP40:%.*]] = icmp sgt i16 [[TMP7]], 0 -// CHECK1-NEXT: [[TMP41:%.*]] = and i1 [[TMP39]], [[TMP40]] -// CHECK1-NEXT: [[TMP42:%.*]] = or i1 [[TMP32]], [[TMP35]] -// CHECK1-NEXT: [[TMP43:%.*]] = or i1 [[TMP42]], [[TMP41]] -// CHECK1-NEXT: br i1 [[TMP43]], label [[THEN:%.*]], label [[ELSE:%.*]] +// CHECK1-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8, !tbaa [[TBAA12]] +// CHECK1-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP7]], 0 +// CHECK1-NEXT: [[TMP26:%.*]] = icmp eq i16 [[TMP7]], 1 +// CHECK1-NEXT: [[TMP27:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: [[TMP28:%.*]] = and i1 [[TMP26]], [[TMP27]] +// CHECK1-NEXT: [[TMP29:%.*]] = icmp eq i16 [[TMP7]], 2 +// CHECK1-NEXT: [[TMP30:%.*]] = and i16 [[TMP5]], 1 +// CHECK1-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP30]], 0 +// CHECK1-NEXT: [[TMP32:%.*]] = and i1 [[TMP29]], [[TMP31]] +// CHECK1-NEXT: [[TMP33:%.*]] = icmp sgt i16 [[TMP6]], 0 +// CHECK1-NEXT: [[TMP34:%.*]] = and i1 [[TMP32]], [[TMP33]] +// CHECK1-NEXT: [[TMP35:%.*]] = or i1 [[TMP25]], [[TMP28]] +// CHECK1-NEXT: [[TMP36:%.*]] = or i1 [[TMP35]], [[TMP34]] +// CHECK1-NEXT: br i1 [[TMP36]], label [[THEN:%.*]], label [[ELSE:%.*]] // CHECK1: then: // CHECK1-NEXT: call void @"_omp$reduction$reduction_func4"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR6]] // CHECK1-NEXT: br label [[IFCONT:%.*]] // CHECK1: else: // CHECK1-NEXT: br label [[IFCONT]] // CHECK1: ifcont: -// CHECK1-NEXT: [[TMP46:%.*]] = icmp eq i16 [[TMP8]], 1 -// CHECK1-NEXT: [[TMP47:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: [[TMP48:%.*]] = and i1 [[TMP46]], [[TMP47]] -// CHECK1-NEXT: br i1 [[TMP48]], label [[THEN4:%.*]], label [[ELSE5:%.*]] +// CHECK1-NEXT: [[TMP37:%.*]] = icmp eq i16 [[TMP7]], 1 +// CHECK1-NEXT: [[TMP38:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]] +// CHECK1-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: br i1 [[TMP39]], label [[THEN4:%.*]], label [[ELSE5:%.*]] // CHECK1: then4: -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP52]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP54]], ptr align 8 [[TMP51]], i64 16, i1 false), !tbaa.struct !27 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[TMP40]], align 8 +// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP43]], ptr align 8 [[TMP41]], i64 16, i1 false), !tbaa.struct !28 // CHECK1-NEXT: br label [[IFCONT6:%.*]] // CHECK1: else5: // CHECK1-NEXT: br label [[IFCONT6]] @@ -961,45 +961,45 @@ // CHECK1-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() // CHECK1-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK1-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[PRECOND:%.*]] // CHECK1: precond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 4 -// CHECK1-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 4 +// CHECK1-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]] // CHECK1: body: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP2]]) // CHECK1-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 // CHECK1-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] // CHECK1: then: -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP11]], i32 [[TMP8]] -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP13]], align 4 -// CHECK1-NEXT: store volatile i32 [[TMP15]], ptr addrspace(3) [[TMP14]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]] +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4, !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4 // CHECK1-NEXT: br label [[IFCONT:%.*]] // CHECK1: else: // CHECK1-NEXT: br label [[IFCONT]] // CHECK1: ifcont: // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]] // CHECK1-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] // CHECK1: then2: -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP19]], i32 [[TMP8]] -// CHECK1-NEXT: [[TMP22:%.*]] = load volatile i32, ptr addrspace(3) [[TMP17]], align 4, !tbaa [[TBAA8]] -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]] +// CHECK1-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4, !tbaa [[TBAA8]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[IFCONT4:%.*]] // CHECK1: else3: // CHECK1-NEXT: br label [[IFCONT4]] // CHECK1: ifcont4: -// CHECK1-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[TMP23]], ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] +// CHECK1-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1 +// CHECK1-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: br label [[PRECOND]] // CHECK1: exit: // CHECK1-NEXT: ret void @@ -1012,18 +1012,18 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 -// CHECK1-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2, !tbaa [[TBAA19]] +// CHECK1-NEXT: store i16 [[TMP0]], ptr [[DOTADDR]], align 2, !tbaa [[TBAA20]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4, !tbaa [[TBAA8]] // CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 // CHECK1-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP3]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP6]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP9]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP5]], ptr [[TMP8]], ptr [[TMP11]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP5]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8, !tbaa [[TBAA12]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP4]], ptr [[TMP6]], ptr [[TMP8]]) #[[ATTR6]] // CHECK1-NEXT: ret void // // @@ -1039,12 +1039,12 @@ // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RE_ADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4, !tbaa [[TBAA14]] -// CHECK1-NEXT: store float [[TMP1]], ptr [[__RE_]], align 4, !tbaa [[TBAA16]] +// CHECK1-NEXT: [[TMP1:%.*]] = load float, ptr [[TMP0]], align 4, !tbaa [[TBAA15]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store float [[TMP1]], ptr [[__RE_]], align 4, !tbaa [[TBAA17]] // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 1 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__IM_ADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4, !tbaa [[TBAA14]] -// CHECK1-NEXT: store float [[TMP3]], ptr [[__IM_]], align 4, !tbaa [[TBAA18]] +// CHECK1-NEXT: [[TMP3:%.*]] = load float, ptr [[TMP2]], align 4, !tbaa [[TBAA15]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store float [[TMP3]], ptr [[__IM_]], align 4, !tbaa [[TBAA19]] // CHECK1-NEXT: ret void // // @@ -1055,7 +1055,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load float, ptr [[__RE_]], align 4, !tbaa [[TBAA16]] +// CHECK1-NEXT: [[TMP0:%.*]] = load float, ptr [[__RE_]], align 4, !tbaa [[TBAA17]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: ret float [[TMP0]] // // @@ -1066,7 +1066,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex", ptr [[THIS1]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load float, ptr [[__IM_]], align 4, !tbaa [[TBAA18]] +// CHECK1-NEXT: [[TMP0:%.*]] = load float, ptr [[__IM_]], align 4, !tbaa [[TBAA19]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: ret float [[TMP0]] // // @@ -1082,12 +1082,12 @@ // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[__RE_ADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP1:%.*]] = load double, ptr [[TMP0]], align 8, !tbaa [[TBAA22]] -// CHECK1-NEXT: store double [[TMP1]], ptr [[__RE_]], align 8, !tbaa [[TBAA24]] +// CHECK1-NEXT: [[TMP1:%.*]] = load double, ptr [[TMP0]], align 8, !tbaa [[TBAA23]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store double [[TMP1]], ptr [[__RE_]], align 8, !tbaa [[TBAA25]] // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 1 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[__IM_ADDR]], align 8, !tbaa [[TBAA12]] -// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8, !tbaa [[TBAA22]] -// CHECK1-NEXT: store double [[TMP3]], ptr [[__IM_]], align 8, !tbaa [[TBAA26]] +// CHECK1-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8, !tbaa [[TBAA23]], !noundef [[NOUNDEF14]] +// CHECK1-NEXT: store double [[TMP3]], ptr [[__IM_]], align 8, !tbaa [[TBAA27]] // CHECK1-NEXT: ret void // // @@ -1098,7 +1098,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__RE_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[__RE_]], align 8, !tbaa [[TBAA24]] +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[__RE_]], align 8, !tbaa [[TBAA25]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: ret double [[TMP0]] // // @@ -1109,6 +1109,6 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8, !tbaa [[TBAA12]] // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[__IM_:%.*]] = getelementptr inbounds %"class.std::complex.0", ptr [[THIS1]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[__IM_]], align 8, !tbaa [[TBAA26]] +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[__IM_]], align 8, !tbaa [[TBAA27]], !noundef [[NOUNDEF14]] // CHECK1-NEXT: ret double [[TMP0]] // diff --git a/clang/test/OpenMP/ordered_codegen.cpp b/clang/test/OpenMP/ordered_codegen.cpp --- a/clang/test/OpenMP/ordered_codegen.cpp +++ b/clang/test/OpenMP/ordered_codegen.cpp @@ -152,39 +152,39 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4 // CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM1]] -// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM4]] -// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]] -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM7]] // CHECK1-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4 @@ -193,7 +193,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -237,37 +237,37 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1 // CHECK1-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127 // CHECK1-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]] // CHECK1-NEXT: store i64 [[ADD1]], ptr [[I]], align 8 // CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP7]] -// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]] -// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP13]] -// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]] -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] // CHECK1-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK1-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -275,7 +275,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1 // CHECK1-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -318,10 +318,10 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[X]], align 4 // CHECK1-NEXT: store i32 0, ptr [[Y]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8 // CHECK1-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32 // CHECK1-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]] // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1 @@ -330,20 +330,20 @@ // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11 // CHECK1-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK1-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 // CHECK1-NEXT: store i32 11, ptr [[X6]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 1073741894, i64 0, i64 [[TMP6]], i64 1, i64 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: @@ -351,25 +351,25 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11 // CHECK1-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[I8]], align 1 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11 // CHECK1-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11 // CHECK1-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]] @@ -378,25 +378,25 @@ // CHECK1-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32 // CHECK1-NEXT: store i32 [[CONV21]], ptr [[X9]], align 4 // CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64 // CHECK1-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM22]] -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]] -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64 // CHECK1-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM25]] -// CHECK1-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]] -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64 // CHECK1-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[IDXPROM28]] // CHECK1-NEXT: store float [[MUL27]], ptr [[ARRAYIDX29]], align 4 @@ -405,7 +405,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1 // CHECK1-NEXT: store i64 [[ADD30]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -455,23 +455,23 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8 // CHECK1-NEXT: store i8 [[CONV]], ptr [[I]], align 1 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20 // CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]] @@ -479,24 +479,24 @@ // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[X2]], align 4 // CHECK1-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM7]] -// CHECK1-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]] -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM10]] -// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]] -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM13]] @@ -506,7 +506,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK1-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -546,56 +546,56 @@ // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4 // CHECK1-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK1: simd.if.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1 // CHECK1-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]] // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1 // CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]] -// CHECK1-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group !3 +// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group [[ACC_GRP4]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK1: omp.inner.for.end: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]] // CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 // CHECK1-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1 @@ -605,31 +605,31 @@ // CHECK1-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4 // CHECK1-NEXT: br label [[SIMD_IF_END]] // CHECK1: simd.if.end: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK1-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1 // CHECK1-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1 // CHECK1-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1 // CHECK1-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1 // CHECK1-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP21]], ptr [[I26]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]] // CHECK1-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: @@ -637,47 +637,47 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_IV16]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK1: omp.inner.for.cond29: -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1 // CHECK1-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]] // CHECK1-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]] // CHECK1: omp.inner.for.body32: -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1 // CHECK1-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]] -// CHECK1-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4, !llvm.access.group !7 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group !7 +// CHECK1-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64 // CHECK1-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM35]] -// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4, !llvm.access.group !7 -// CHECK1-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group !7 +// CHECK1-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK1-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group [[ACC_GRP8]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]] // CHECK1: omp.body.continue37: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]] // CHECK1: omp.inner.for.inc38: -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1 -// CHECK1-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK1-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group [[ACC_GRP8]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK1: omp.inner.for.end40: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 // CHECK1-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]] // CHECK1-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1 // CHECK1-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1 @@ -699,7 +699,7 @@ // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK1-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -712,7 +712,7 @@ // CHECK1-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK1-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -750,40 +750,40 @@ // CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1-IRBUILDER: omp.dispatch.body: -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond: -// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body: -// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK1-IRBUILDER-NEXT: store i32 [[SUB]], ptr [[I]], align 4 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]]) -// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]] -// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP9]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM3]] -// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP12]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM6]] -// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP13]] -// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP15]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM9]] // CHECK1-IRBUILDER-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4 @@ -794,7 +794,7 @@ // CHECK1-IRBUILDER: omp.body.continue: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc: -// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1 // CHECK1-IRBUILDER-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]]) @@ -841,38 +841,38 @@ // CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1-IRBUILDER: omp.dispatch.body: -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_IV]], align 8 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond: -// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i64 [[TMP3]], 1 // CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP2]], [[ADD]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body: -// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul i64 [[TMP4]], 127 // CHECK1-IRBUILDER-NEXT: [[ADD2:%.*]] = add i64 131071, [[MUL]] // CHECK1-IRBUILDER-NEXT: store i64 [[ADD2]], ptr [[I]], align 8 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]]) -// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[TMP6]] -// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]] -// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[TMP12]] -// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL7:%.*]] = fmul float [[MUL5]], [[TMP13]] -// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP15]] // CHECK1-IRBUILDER-NEXT: store float [[MUL7]], ptr [[ARRAYIDX8]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]] @@ -882,7 +882,7 @@ // CHECK1-IRBUILDER: omp.body.continue: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc: -// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 // CHECK1-IRBUILDER-NEXT: store i64 [[ADD9]], ptr [[DOTOMP_IV]], align 8 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]]) @@ -926,10 +926,10 @@ // CHECK1-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4 // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[Y]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8 // CHECK1-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CONV3:%.*]] = sext i8 [[TMP1]] to i32 // CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]] // CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1 @@ -938,20 +938,20 @@ // CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11 // CHECK1-IRBUILDER-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK1-IRBUILDER-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i8 [[TMP2]], ptr [[I]], align 1 // CHECK1-IRBUILDER-NEXT: store i32 11, ptr [[X6]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CONV7:%.*]] = sext i8 [[TMP3]] to i32 // CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57 // CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1-IRBUILDER: omp.precond.then: // CHECK1-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_UB]], align 8 // CHECK1-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8:[0-9]+]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1073741894, i64 0, i64 [[TMP5]], i64 1, i64 1) // CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -961,25 +961,25 @@ // CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1-IRBUILDER: omp.dispatch.body: -// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_IV]], align 8 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond: -// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP8]], [[TMP9]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body: -// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CONV12:%.*]] = sext i8 [[TMP10]] to i64 -// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[DIV13:%.*]] = sdiv i64 [[TMP11]], 11 // CHECK1-IRBUILDER-NEXT: [[MUL14:%.*]] = mul nsw i64 [[DIV13]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD15:%.*]] = add nsw i64 [[CONV12]], [[MUL14]] // CHECK1-IRBUILDER-NEXT: [[CONV16:%.*]] = trunc i64 [[ADD15]] to i8 // CHECK1-IRBUILDER-NEXT: store i8 [[CONV16]], ptr [[I8]], align 1 -// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[DIV17:%.*]] = sdiv i64 [[TMP13]], 11 // CHECK1-IRBUILDER-NEXT: [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 11 // CHECK1-IRBUILDER-NEXT: [[SUB19:%.*]] = sub nsw i64 [[TMP12]], [[MUL18]] @@ -989,25 +989,25 @@ // CHECK1-IRBUILDER-NEXT: store i32 [[CONV22]], ptr [[X9]], align 4 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM23:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]]) -// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP15]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP18]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM24]] -// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL26:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP21]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM27]] -// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL29:%.*]] = fmul float [[MUL26]], [[TMP22]] -// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM30:%.*]] = sext i8 [[TMP24]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM30]] // CHECK1-IRBUILDER-NEXT: store float [[MUL29]], ptr [[ARRAYIDX31]], align 4 @@ -1018,7 +1018,7 @@ // CHECK1-IRBUILDER: omp.body.continue: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc: -// CHECK1-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD32:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK1-IRBUILDER-NEXT: store i64 [[ADD32]], ptr [[DOTOMP_IV]], align 8 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM33:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]]) @@ -1071,23 +1071,23 @@ // CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1-IRBUILDER: omp.dispatch.body: -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond: -// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body: -// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 20 // CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]] // CHECK1-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8 // CHECK1-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[I]], align 1 -// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP6]], 20 // CHECK1-IRBUILDER-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 20 // CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], [[MUL5]] @@ -1096,24 +1096,24 @@ // CHECK1-IRBUILDER-NEXT: store i32 [[ADD7]], ptr [[X2]], align 4 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]]) -// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP8]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM]] -// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP11]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM9]] -// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL11:%.*]] = fmul float [[TMP9]], [[TMP12]] -// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-IRBUILDER-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP14]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM12]] -// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL14:%.*]] = fmul float [[MUL11]], [[TMP15]] -// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load i8, ptr [[I]], align 1 // CHECK1-IRBUILDER-NEXT: [[IDXPROM15:%.*]] = zext i8 [[TMP17]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM15]] @@ -1125,7 +1125,7 @@ // CHECK1-IRBUILDER: omp.body.continue: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc: -// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK1-IRBUILDER-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM18:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]]) @@ -1166,58 +1166,58 @@ // CHECK1-IRBUILDER-NEXT: [[I28:%.*]] = alloca i32, align 4 // CHECK1-IRBUILDER-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4 // CHECK1-IRBUILDER-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK1-IRBUILDER-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-IRBUILDER-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-IRBUILDER-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP4]], ptr [[I]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK1-IRBUILDER: simd.if.then: // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond: -// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK1-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1 // CHECK1-IRBUILDER-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body: -// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3 -// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]] -// CHECK1-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group !3 -// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !3 +// CHECK1-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] -// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group !3 +// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group [[ACC_GRP4]] // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body.ordered.after: // CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1-IRBUILDER: omp.body.continue: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc: -// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD9:%.*]] = add i32 [[TMP12]], 1 -// CHECK1-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK1-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK1-IRBUILDER: omp.inner.for.end: -// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[SUB10:%.*]] = sub i32 [[TMP14]], [[TMP15]] // CHECK1-IRBUILDER-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1 @@ -1227,31 +1227,31 @@ // CHECK1-IRBUILDER-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[SIMD_IF_END]] // CHECK1-IRBUILDER: simd.if.end: -// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP16]], ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[SUB21:%.*]] = sub i32 [[TMP18]], [[TMP19]] // CHECK1-IRBUILDER-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1 // CHECK1-IRBUILDER-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1 // CHECK1-IRBUILDER-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1 // CHECK1-IRBUILDER-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP20]], ptr [[I26]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP21]], [[TMP22]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1-IRBUILDER: omp.precond.then: // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4 // CHECK1-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]]) // CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 [[TMP24]], i32 1, i32 1) // CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -1261,50 +1261,50 @@ // CHECK1-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1-IRBUILDER: omp.dispatch.body: -// CHECK1-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV16]], align 4 // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]] // CHECK1-IRBUILDER: omp.inner.for.cond30: -// CHECK1-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK1-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD31:%.*]] = add i32 [[TMP28]], 1 // CHECK1-IRBUILDER-NEXT: [[CMP32:%.*]] = icmp ult i32 [[TMP27]], [[ADD31]] // CHECK1-IRBUILDER-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END42:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body33: -// CHECK1-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7 -// CHECK1-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[MUL34:%.*]] = mul i32 [[TMP30]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD35:%.*]] = add i32 [[TMP29]], [[MUL34]] -// CHECK1-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4, !llvm.access.group !7 -// CHECK1-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK1-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP31]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM36]] -// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4, !llvm.access.group !7 -// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK1-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group [[ACC_GRP8]] // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY33_ORDERED_AFTER:%.*]] // CHECK1-IRBUILDER: omp.inner.for.body33.ordered.after: // CHECK1-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE38:%.*]] // CHECK1-IRBUILDER: omp.body.continue38: // CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC39:%.*]] // CHECK1-IRBUILDER: omp.inner.for.inc39: -// CHECK1-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[ADD40:%.*]] = add i32 [[TMP32]], 1 -// CHECK1-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK1-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK1-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM41:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]]) -// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]]), !llvm.access.group !7 -// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK1-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]]), !llvm.access.group [[ACC_GRP8]] +// CHECK1-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK1-IRBUILDER: omp.inner.for.end42: // CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1-IRBUILDER: omp.dispatch.inc: // CHECK1-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1-IRBUILDER: omp.dispatch.end: -// CHECK1-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK1-IRBUILDER-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1-IRBUILDER: .omp.final.then: -// CHECK1-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK1-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[SUB43:%.*]] = sub i32 [[TMP36]], [[TMP37]] // CHECK1-IRBUILDER-NEXT: [[SUB44:%.*]] = sub i32 [[SUB43]], 1 // CHECK1-IRBUILDER-NEXT: [[ADD45:%.*]] = add i32 [[SUB44]], 1 @@ -1327,7 +1327,7 @@ // CHECK1-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK1-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -1340,7 +1340,7 @@ // CHECK1-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK1-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK1-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK1-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -1377,39 +1377,39 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 7 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4 // CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP10]] to i64 // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM1]] -// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP13]] to i64 // CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM4]] -// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL6:%.*]] = fmul float [[MUL3]], [[TMP14]] -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM7]] // CHECK3-NEXT: store float [[MUL6]], ptr [[ARRAYIDX8]], align 4 @@ -1418,7 +1418,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -1462,37 +1462,37 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[TMP4]], 1 // CHECK3-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP3]], [[ADD]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP5]], 127 // CHECK3-NEXT: [[ADD1:%.*]] = add i64 131071, [[MUL]] // CHECK3-NEXT: store i64 [[ADD1]], ptr [[I]], align 8 // CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[TMP7]] -// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]] -// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL3:%.*]] = fmul float [[TMP8]], [[TMP11]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL5:%.*]] = fmul float [[MUL3]], [[TMP14]] -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] // CHECK3-NEXT: store float [[MUL5]], ptr [[ARRAYIDX6]], align 4 // CHECK3-NEXT: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -1500,7 +1500,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP17]], 1 // CHECK3-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: call void @__kmpc_dispatch_fini_8u(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -1543,10 +1543,10 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[X]], align 4 // CHECK3-NEXT: store i32 0, ptr [[Y]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[Y]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[TMP1]] to i8 // CHECK3-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP2]] to i32 // CHECK3-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]] // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1 @@ -1555,20 +1555,20 @@ // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11 // CHECK3-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK3-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i8 [[TMP3]], ptr [[I]], align 1 // CHECK3-NEXT: store i32 11, ptr [[X6]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV7:%.*]] = sext i8 [[TMP4]] to i32 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[TMP0]], i32 70, i64 0, i64 [[TMP6]], i64 1, i64 1) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: @@ -1576,25 +1576,25 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP10:%.*]] = icmp sle i64 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV11:%.*]] = sext i8 [[TMP11]] to i64 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[DIV12:%.*]] = sdiv i64 [[TMP12]], 11 // CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i64 [[DIV12]], 1 // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i64 [[CONV11]], [[MUL13]] // CHECK3-NEXT: [[CONV15:%.*]] = trunc i64 [[ADD14]] to i8 // CHECK3-NEXT: store i8 [[CONV15]], ptr [[I8]], align 1 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP14]], 11 // CHECK3-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 11 // CHECK3-NEXT: [[SUB18:%.*]] = sub nsw i64 [[TMP13]], [[MUL17]] @@ -1603,25 +1603,25 @@ // CHECK3-NEXT: [[CONV21:%.*]] = trunc i64 [[SUB20]] to i32 // CHECK3-NEXT: store i32 [[CONV21]], ptr [[X9]], align 4 // CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP16]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM22:%.*]] = sext i8 [[TMP19]] to i64 // CHECK3-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[IDXPROM22]] -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX23]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL24:%.*]] = fmul float [[TMP17]], [[TMP20]] -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM25:%.*]] = sext i8 [[TMP22]] to i64 // CHECK3-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[IDXPROM25]] -// CHECK3-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX26]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL27:%.*]] = fmul float [[MUL24]], [[TMP23]] -// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP25:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM28:%.*]] = sext i8 [[TMP25]] to i64 // CHECK3-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[IDXPROM28]] // CHECK3-NEXT: store float [[MUL27]], ptr [[ARRAYIDX29]], align 4 @@ -1630,7 +1630,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD30:%.*]] = add nsw i64 [[TMP26]], 1 // CHECK3-NEXT: store i64 [[ADD30]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: call void @__kmpc_dispatch_fini_8(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -1680,23 +1680,23 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP5]], 20 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8 // CHECK3-NEXT: store i8 [[CONV]], ptr [[I]], align 1 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP7]], 20 // CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 20 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], [[MUL4]] @@ -1704,24 +1704,24 @@ // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 -10, [[MUL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[X2]], align 4 // CHECK3-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[TMP0]]) -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP9]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-NEXT: [[IDXPROM7:%.*]] = zext i8 [[TMP12]] to i64 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM7]] -// CHECK3-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX8]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL9:%.*]] = fmul float [[TMP10]], [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-NEXT: [[IDXPROM10:%.*]] = zext i8 [[TMP15]] to i64 // CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM10]] -// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL12:%.*]] = fmul float [[MUL9]], [[TMP16]] -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-NEXT: [[IDXPROM13:%.*]] = zext i8 [[TMP18]] to i64 // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM13]] @@ -1731,7 +1731,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK3-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: call void @__kmpc_dispatch_fini_4(ptr @[[GLOB1]], i32 [[TMP0]]) @@ -1771,56 +1771,56 @@ // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4 // CHECK3-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK3: simd.if.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD6:%.*]] = add i32 [[TMP9]], 1 // CHECK3-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP8]], [[ADD6]] // CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP11]], 1 // CHECK3-NEXT: [[ADD8:%.*]] = add i32 [[TMP10]], [[MUL]] -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group !3 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !3 +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] -// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK3-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group !3 +// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group [[ACC_GRP4]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK3: omp.inner.for.end: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB10:%.*]] = sub i32 [[TMP15]], [[TMP16]] // CHECK3-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 // CHECK3-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1 @@ -1830,31 +1830,31 @@ // CHECK3-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4 // CHECK3-NEXT: br label [[SIMD_IF_END]] // CHECK3: simd.if.end: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB21:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK3-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1 // CHECK3-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1 // CHECK3-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1 // CHECK3-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1 // CHECK3-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP21]], ptr [[I26]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP22]], [[TMP23]] // CHECK3-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[TMP0]], i32 66, i32 0, i32 [[TMP25]], i32 1, i32 1) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: @@ -1862,47 +1862,47 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_IV16]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK3: omp.inner.for.cond29: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD30:%.*]] = add i32 [[TMP29]], 1 // CHECK3-NEXT: [[CMP31:%.*]] = icmp ult i32 [[TMP28]], [[ADD30]] // CHECK3-NEXT: br i1 [[CMP31]], label [[OMP_INNER_FOR_BODY32:%.*]], label [[OMP_INNER_FOR_END40:%.*]] // CHECK3: omp.inner.for.body32: -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[MUL33:%.*]] = mul i32 [[TMP31]], 1 // CHECK3-NEXT: [[ADD34:%.*]] = add i32 [[TMP30]], [[MUL33]] -// CHECK3-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4, !llvm.access.group !7 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group !7 +// CHECK3-NEXT: store i32 [[ADD34]], ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM35:%.*]] = sext i32 [[TMP32]] to i64 // CHECK3-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM35]] -// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4, !llvm.access.group !7 -// CHECK3-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group !7 +// CHECK3-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX36]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK3-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group [[ACC_GRP8]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE37:%.*]] // CHECK3: omp.body.continue37: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC38:%.*]] // CHECK3: omp.inner.for.inc38: -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD39:%.*]] = add i32 [[TMP33]], 1 -// CHECK3-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group !7 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD39]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK3-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[TMP0]]), !llvm.access.group [[ACC_GRP8]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK3: omp.inner.for.end40: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: -// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 // CHECK3-NEXT: br i1 [[TMP35]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: -// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB41:%.*]] = sub i32 [[TMP37]], [[TMP38]] // CHECK3-NEXT: [[SUB42:%.*]] = sub i32 [[SUB41]], 1 // CHECK3-NEXT: [[ADD43:%.*]] = add i32 [[SUB42]], 1 @@ -1924,7 +1924,7 @@ // CHECK3-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK3-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -1937,7 +1937,7 @@ // CHECK3-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK3-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -1975,40 +1975,40 @@ // CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3-IRBUILDER: omp.dispatch.body: -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond: -// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body: -// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 7 // CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 32000000, [[MUL]] // CHECK3-IRBUILDER-NEXT: store i32 [[SUB]], ptr [[I]], align 4 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]]) -// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM]] -// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP9]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM3]] -// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP12]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM6]] -// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL8:%.*]] = fmul float [[MUL5]], [[TMP13]] -// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP15]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM9]] // CHECK3-IRBUILDER-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4 @@ -2019,7 +2019,7 @@ // CHECK3-IRBUILDER: omp.body.continue: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc: -// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1 // CHECK3-IRBUILDER-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM11:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]]) @@ -2066,38 +2066,38 @@ // CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3-IRBUILDER: omp.dispatch.body: -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_IV]], align 8 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond: -// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i64 [[TMP3]], 1 // CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP2]], [[ADD]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body: -// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul i64 [[TMP4]], 127 // CHECK3-IRBUILDER-NEXT: [[ADD2:%.*]] = add i64 131071, [[MUL]] // CHECK3-IRBUILDER-NEXT: store i64 [[ADD2]], ptr [[I]], align 8 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]]) -// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[TMP6]] -// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[TMP9]] -// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = fmul float [[TMP7]], [[TMP10]] -// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[TMP12]] -// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL7:%.*]] = fmul float [[MUL5]], [[TMP13]] -// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[TMP15]] // CHECK3-IRBUILDER-NEXT: store float [[MUL7]], ptr [[ARRAYIDX8]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]] @@ -2107,7 +2107,7 @@ // CHECK3-IRBUILDER: omp.body.continue: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc: -// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 // CHECK3-IRBUILDER-NEXT: store i64 [[ADD9]], ptr [[DOTOMP_IV]], align 8 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB6]]) @@ -2151,10 +2151,10 @@ // CHECK3-IRBUILDER-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[X]], align 4 // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[Y]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8 // CHECK3-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CONV3:%.*]] = sext i8 [[TMP1]] to i32 // CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 57, [[CONV3]] // CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB]], 1 @@ -2163,20 +2163,20 @@ // CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV4]], 11 // CHECK3-IRBUILDER-NEXT: [[SUB5:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK3-IRBUILDER-NEXT: store i64 [[SUB5]], ptr [[DOTCAPTURE_EXPR_2]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i8 [[TMP2]], ptr [[I]], align 1 // CHECK3-IRBUILDER-NEXT: store i32 11, ptr [[X6]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CONV7:%.*]] = sext i8 [[TMP3]] to i32 // CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV7]], 57 // CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3-IRBUILDER: omp.precond.then: // CHECK3-IRBUILDER-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_UB]], align 8 // CHECK3-IRBUILDER-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_2]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8:[0-9]+]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_8(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 70, i64 0, i64 [[TMP5]], i64 1, i64 1) // CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -2186,25 +2186,25 @@ // CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3-IRBUILDER: omp.dispatch.body: -// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_IV]], align 8 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond: -// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP8]], [[TMP9]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body: -// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CONV12:%.*]] = sext i8 [[TMP10]] to i64 -// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[DIV13:%.*]] = sdiv i64 [[TMP11]], 11 // CHECK3-IRBUILDER-NEXT: [[MUL14:%.*]] = mul nsw i64 [[DIV13]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD15:%.*]] = add nsw i64 [[CONV12]], [[MUL14]] // CHECK3-IRBUILDER-NEXT: [[CONV16:%.*]] = trunc i64 [[ADD15]] to i8 // CHECK3-IRBUILDER-NEXT: store i8 [[CONV16]], ptr [[I8]], align 1 -// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[DIV17:%.*]] = sdiv i64 [[TMP13]], 11 // CHECK3-IRBUILDER-NEXT: [[MUL18:%.*]] = mul nsw i64 [[DIV17]], 11 // CHECK3-IRBUILDER-NEXT: [[SUB19:%.*]] = sub nsw i64 [[TMP12]], [[MUL18]] @@ -2214,25 +2214,25 @@ // CHECK3-IRBUILDER-NEXT: store i32 [[CONV22]], ptr [[X9]], align 4 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM23:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM23]]) -// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP15]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i64 [[IDXPROM]] -// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM24:%.*]] = sext i8 [[TMP18]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[IDXPROM24]] -// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX25]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL26:%.*]] = fmul float [[TMP16]], [[TMP19]] -// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM27:%.*]] = sext i8 [[TMP21]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i64 [[IDXPROM27]] -// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX28]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL29:%.*]] = fmul float [[MUL26]], [[TMP22]] -// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1 +// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i8, ptr [[I8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM30:%.*]] = sext i8 [[TMP24]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds float, ptr [[TMP23]], i64 [[IDXPROM30]] // CHECK3-IRBUILDER-NEXT: store float [[MUL29]], ptr [[ARRAYIDX31]], align 4 @@ -2243,7 +2243,7 @@ // CHECK3-IRBUILDER: omp.body.continue: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc: -// CHECK3-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD32:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK3-IRBUILDER-NEXT: store i64 [[ADD32]], ptr [[DOTOMP_IV]], align 8 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM33:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB8]]) @@ -2296,23 +2296,23 @@ // CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3-IRBUILDER: omp.dispatch.body: -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond: -// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body: -// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP4]], 20 // CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add nsw i32 48, [[MUL]] // CHECK3-IRBUILDER-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i8 // CHECK3-IRBUILDER-NEXT: store i8 [[CONV]], ptr [[I]], align 1 -// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP6]], 20 // CHECK3-IRBUILDER-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 20 // CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], [[MUL5]] @@ -2321,24 +2321,24 @@ // CHECK3-IRBUILDER-NEXT: store i32 [[ADD7]], ptr [[X2]], align 4 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_ordered(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8]]) -// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP8]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM]] -// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-IRBUILDER-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP11]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM9]] -// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL11:%.*]] = fmul float [[TMP9]], [[TMP12]] -// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-IRBUILDER-NEXT: [[IDXPROM12:%.*]] = zext i8 [[TMP14]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[IDXPROM12]] -// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX13]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL14:%.*]] = fmul float [[MUL11]], [[TMP15]] -// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load i8, ptr [[I]], align 1 // CHECK3-IRBUILDER-NEXT: [[IDXPROM15:%.*]] = zext i8 [[TMP17]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i64 [[IDXPROM15]] @@ -2350,7 +2350,7 @@ // CHECK3-IRBUILDER: omp.body.continue: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc: -// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK3-IRBUILDER-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM18:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB10]]) @@ -2391,58 +2391,58 @@ // CHECK3-IRBUILDER-NEXT: [[I28:%.*]] = alloca i32, align 4 // CHECK3-IRBUILDER-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4 // CHECK3-IRBUILDER-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK3-IRBUILDER-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-IRBUILDER-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-IRBUILDER-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-IRBUILDER-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP4]], ptr [[I]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK3-IRBUILDER: simd.if.then: // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond: -// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK3-IRBUILDER-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1 // CHECK3-IRBUILDER-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body: -// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !3 -// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK3-IRBUILDER-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]] -// CHECK3-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group !3 -// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !3 +// CHECK3-IRBUILDER-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-IRBUILDER-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] -// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group !3 -// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group !3 +// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt(ptr [[I5]]), !llvm.access.group [[ACC_GRP4]] // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY_ORDERED_AFTER:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body.ordered.after: // CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3-IRBUILDER: omp.body.continue: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc: -// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK3-IRBUILDER-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD9:%.*]] = add i32 [[TMP12]], 1 -// CHECK3-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK3-IRBUILDER-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK3-IRBUILDER: omp.inner.for.end: -// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[SUB10:%.*]] = sub i32 [[TMP14]], [[TMP15]] // CHECK3-IRBUILDER-NEXT: [[SUB11:%.*]] = sub i32 [[SUB10]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD12:%.*]] = add i32 [[SUB11]], 1 @@ -2452,31 +2452,31 @@ // CHECK3-IRBUILDER-NEXT: store i32 [[ADD15]], ptr [[I5]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[SIMD_IF_END]] // CHECK3-IRBUILDER: simd.if.end: -// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP16:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP16]], ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP17:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[SUB21:%.*]] = sub i32 [[TMP18]], [[TMP19]] // CHECK3-IRBUILDER-NEXT: [[SUB22:%.*]] = sub i32 [[SUB21]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD23:%.*]] = add i32 [[SUB22]], 1 // CHECK3-IRBUILDER-NEXT: [[DIV24:%.*]] = udiv i32 [[ADD23]], 1 // CHECK3-IRBUILDER-NEXT: [[SUB25:%.*]] = sub i32 [[DIV24]], 1 // CHECK3-IRBUILDER-NEXT: store i32 [[SUB25]], ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP20]], ptr [[I26]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[CMP27:%.*]] = icmp slt i32 [[TMP21]], [[TMP22]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP27]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3-IRBUILDER: omp.precond.then: // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP23]], ptr [[DOTOMP_UB]], align 4 // CHECK3-IRBUILDER-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-IRBUILDER-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12:[0-9]+]]) // CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_init_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 66, i32 0, i32 [[TMP24]], i32 1, i32 1) // CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -2486,50 +2486,50 @@ // CHECK3-IRBUILDER-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3-IRBUILDER: omp.dispatch.body: -// CHECK3-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV16]], align 4 // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30:%.*]] // CHECK3-IRBUILDER: omp.inner.for.cond30: -// CHECK3-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 -// CHECK3-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD31:%.*]] = add i32 [[TMP28]], 1 // CHECK3-IRBUILDER-NEXT: [[CMP32:%.*]] = icmp ult i32 [[TMP27]], [[ADD31]] // CHECK3-IRBUILDER-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END42:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body33: -// CHECK3-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group !7 -// CHECK3-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[MUL34:%.*]] = mul i32 [[TMP30]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD35:%.*]] = add i32 [[TMP29]], [[MUL34]] -// CHECK3-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4, !llvm.access.group !7 -// CHECK3-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: store i32 [[ADD35]], ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK3-IRBUILDER-NEXT: [[TMP31:%.*]] = load i32, ptr [[I28]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP31]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM36]] -// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4, !llvm.access.group !7 -// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK3-IRBUILDER-NEXT: call void @__captured_stmt.1(ptr [[I28]]), !llvm.access.group [[ACC_GRP8]] // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_BODY33_ORDERED_AFTER:%.*]] // CHECK3-IRBUILDER: omp.inner.for.body33.ordered.after: // CHECK3-IRBUILDER-NEXT: br label [[OMP_BODY_CONTINUE38:%.*]] // CHECK3-IRBUILDER: omp.body.continue38: // CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_INC39:%.*]] // CHECK3-IRBUILDER: omp.inner.for.inc39: -// CHECK3-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[ADD40:%.*]] = add i32 [[TMP32]], 1 -// CHECK3-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group !7 +// CHECK3-IRBUILDER-NEXT: store i32 [[ADD40]], ptr [[DOTOMP_IV16]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK3-IRBUILDER-NEXT: [[OMP_GLOBAL_THREAD_NUM41:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB12]]) -// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]]), !llvm.access.group !7 -// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK3-IRBUILDER-NEXT: call void @__kmpc_dispatch_fini_4u(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM41]]), !llvm.access.group [[ACC_GRP8]] +// CHECK3-IRBUILDER-NEXT: br label [[OMP_INNER_FOR_COND30]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK3-IRBUILDER: omp.inner.for.end42: // CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3-IRBUILDER: omp.dispatch.inc: // CHECK3-IRBUILDER-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3-IRBUILDER: omp.dispatch.end: -// CHECK3-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK3-IRBUILDER-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3-IRBUILDER: .omp.final.then: -// CHECK3-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK3-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-IRBUILDER-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_18]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[SUB43:%.*]] = sub i32 [[TMP36]], [[TMP37]] // CHECK3-IRBUILDER-NEXT: [[SUB44:%.*]] = sub i32 [[SUB43]], 1 // CHECK3-IRBUILDER-NEXT: [[ADD45:%.*]] = add i32 [[SUB44]], 1 @@ -2552,7 +2552,7 @@ // CHECK3-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK3-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK3-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -2565,7 +2565,7 @@ // CHECK3-IRBUILDER-NEXT: [[I_ADDR:%.*]] = alloca ptr, align 8 // CHECK3-IRBUILDER-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK3-IRBUILDER-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 -// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-IRBUILDER-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-IRBUILDER-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // CHECK3-IRBUILDER-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] // CHECK3-IRBUILDER-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX]], align 4 @@ -2587,38 +2587,38 @@ // CHECK5-NEXT: store i32 32000000, ptr [[I]], align 4 // CHECK5-NEXT: br label [[FOR_COND:%.*]] // CHECK5: for.cond: -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], 33 // CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // CHECK5: for.body: -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM1:%.*]] = sext i32 [[TMP5]] to i64 // CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[IDXPROM1]] -// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]] -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP8]] to i64 // CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[IDXPROM3]] -// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX4]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL5:%.*]] = fmul float [[MUL]], [[TMP9]] -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP11]] to i64 // CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[IDXPROM6]] // CHECK5-NEXT: store float [[MUL5]], ptr [[ARRAYIDX7]], align 4 // CHECK5-NEXT: br label [[FOR_INC:%.*]] // CHECK5: for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], -7 // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK5: for.end: // CHECK5-NEXT: ret void // @@ -2638,34 +2638,34 @@ // CHECK5-NEXT: store i64 131071, ptr [[I]], align 8 // CHECK5-NEXT: br label [[FOR_COND:%.*]] // CHECK5: for.cond: -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ult i64 [[TMP0]], 2147483647 // CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // CHECK5: for.body: -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i64 [[TMP2]] -// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i64 [[TMP5]] -// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX1]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX1]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], [[TMP6]] -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i64 [[TMP8]] -// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL3:%.*]] = fmul float [[MUL]], [[TMP9]] -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP11]] // CHECK5-NEXT: store float [[MUL3]], ptr [[ARRAYIDX4]], align 4 // CHECK5-NEXT: br label [[FOR_INC:%.*]] // CHECK5: for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[I]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD:%.*]] = add i64 [[TMP12]], 127 // CHECK5-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK5: for.end: // CHECK5-NEXT: ret void // @@ -2686,12 +2686,12 @@ // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[X]], align 4 // CHECK5-NEXT: store i32 0, ptr [[Y]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[Y]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[TMP0]] to i8 // CHECK5-NEXT: store i8 [[CONV]], ptr [[I]], align 1 // CHECK5-NEXT: br label [[FOR_COND:%.*]] // CHECK5: for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP1:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV1:%.*]] = sext i8 [[TMP1]] to i32 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV1]], 57 // CHECK5-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END13:%.*]] @@ -2699,45 +2699,45 @@ // CHECK5-NEXT: store i32 11, ptr [[X]], align 4 // CHECK5-NEXT: br label [[FOR_COND2:%.*]] // CHECK5: for.cond2: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[X]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[X]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP3:%.*]] = icmp ugt i32 [[TMP2]], 0 // CHECK5-NEXT: br i1 [[CMP3]], label [[FOR_BODY4:%.*]], label [[FOR_END:%.*]] // CHECK5: for.body4: -// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i8 [[TMP4]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM5:%.*]] = sext i8 [[TMP7]] to i64 // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i64 [[IDXPROM5]] -// CHECK5-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX6]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX6]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP5]], [[TMP8]] -// CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i8 [[TMP10]] to i64 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[IDXPROM7]] -// CHECK5-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX8]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX8]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL9:%.*]] = fmul float [[MUL]], [[TMP11]] -// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM10:%.*]] = sext i8 [[TMP13]] to i64 // CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[IDXPROM10]] // CHECK5-NEXT: store float [[MUL9]], ptr [[ARRAYIDX11]], align 4 // CHECK5-NEXT: br label [[FOR_INC:%.*]] // CHECK5: for.inc: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[X]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[X]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DEC:%.*]] = add i32 [[TMP14]], -1 // CHECK5-NEXT: store i32 [[DEC]], ptr [[X]], align 4 -// CHECK5-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK5: for.end: // CHECK5-NEXT: br label [[FOR_INC12:%.*]] // CHECK5: for.inc12: -// CHECK5-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1 +// CHECK5-NEXT: [[TMP15:%.*]] = load i8, ptr [[I]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[INC:%.*]] = add i8 [[TMP15]], 1 // CHECK5-NEXT: store i8 [[INC]], ptr [[I]], align 1 -// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK5: for.end13: // CHECK5-NEXT: ret void // @@ -2767,45 +2767,45 @@ // CHECK5-NEXT: store i32 -10, ptr [[X]], align 4 // CHECK5-NEXT: br label [[FOR_COND1:%.*]] // CHECK5: for.cond1: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[X]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[X]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP1]], 10 // CHECK5-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END:%.*]] // CHECK5: for.body3: -// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8 +// CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[B_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TMP3:%.*]] = load i8, ptr [[I]], align 1 // CHECK5-NEXT: [[IDXPROM:%.*]] = zext i8 [[TMP3]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[C_ADDR]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[C_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[I]], align 1 // CHECK5-NEXT: [[IDXPROM4:%.*]] = zext i8 [[TMP6]] to i64 // CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i64 [[IDXPROM4]] -// CHECK5-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX5]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = fmul float [[TMP4]], [[TMP7]] -// CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[D_ADDR]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[D_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[I]], align 1 // CHECK5-NEXT: [[IDXPROM6:%.*]] = zext i8 [[TMP9]] to i64 // CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i64 [[IDXPROM6]] -// CHECK5-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL8:%.*]] = fmul float [[MUL]], [[TMP10]] -// CHECK5-NEXT: [[TMP11:%.*]] = load ptr, ptr [[A_ADDR]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = load ptr, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TMP12:%.*]] = load i8, ptr [[I]], align 1 // CHECK5-NEXT: [[IDXPROM9:%.*]] = zext i8 [[TMP12]] to i64 // CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i64 [[IDXPROM9]] // CHECK5-NEXT: store float [[MUL8]], ptr [[ARRAYIDX10]], align 4 // CHECK5-NEXT: br label [[FOR_INC:%.*]] // CHECK5: for.inc: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[X]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[X]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1 // CHECK5-NEXT: store i32 [[INC]], ptr [[X]], align 4 -// CHECK5-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND1]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: for.end: // CHECK5-NEXT: br label [[FOR_INC11:%.*]] // CHECK5: for.inc11: // CHECK5-NEXT: [[TMP14:%.*]] = load i8, ptr [[I]], align 1 // CHECK5-NEXT: [[INC12:%.*]] = add i8 [[TMP14]], 1 // CHECK5-NEXT: store i8 [[INC12]], ptr [[I]], align 1 -// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK5-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK5: for.end13: // CHECK5-NEXT: ret void // @@ -2833,59 +2833,59 @@ // CHECK5-NEXT: [[I31:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32 [[LOW]], ptr [[LOW_ADDR]], align 4 // CHECK5-NEXT: store i32 [[UP]], ptr [[UP_ADDR]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK5-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK5-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK5: simd.if.then: // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD6:%.*]] = add i32 [[TMP8]], 1 // CHECK5-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP7]], [[ADD6]] // CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group !9 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP10]], 1 // CHECK5-NEXT: [[ADD8:%.*]] = add i32 [[TMP9]], [[MUL]] -// CHECK5-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group !9 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group !9 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP12]] to i64 // CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM9]] -// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX10]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD11:%.*]] = add i32 [[TMP13]], 1 -// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK5: omp.inner.for.end: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB12:%.*]] = sub i32 [[TMP15]], [[TMP16]] // CHECK5-NEXT: [[SUB13:%.*]] = sub i32 [[SUB12]], 1 // CHECK5-NEXT: [[ADD14:%.*]] = add i32 [[SUB13]], 1 @@ -2895,12 +2895,12 @@ // CHECK5-NEXT: store i32 [[ADD17]], ptr [[I5]], align 4 // CHECK5-NEXT: br label [[SIMD_IF_END]] // CHECK5: simd.if.end: -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[LOW_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[UP_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP18]], ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB22:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK5-NEXT: [[SUB23:%.*]] = sub i32 [[SUB22]], 1 // CHECK5-NEXT: [[ADD24:%.*]] = add i32 [[SUB23]], 1 @@ -2908,50 +2908,50 @@ // CHECK5-NEXT: [[SUB26:%.*]] = sub i32 [[DIV25]], 1 // CHECK5-NEXT: store i32 [[SUB26]], ptr [[DOTCAPTURE_EXPR_21]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_21]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP21]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP22]], ptr [[I27]], align 4 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP28:%.*]] = icmp slt i32 [[TMP23]], [[TMP24]] // CHECK5-NEXT: br i1 [[CMP28]], label [[SIMD_IF_THEN29:%.*]], label [[SIMD_IF_END52:%.*]] // CHECK5: simd.if.then29: -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP25]], ptr [[DOTOMP_IV30]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32:%.*]] // CHECK5: omp.inner.for.cond32: -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group !13 -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD33:%.*]] = add i32 [[TMP27]], 1 // CHECK5-NEXT: [[CMP34:%.*]] = icmp ult i32 [[TMP26]], [[ADD33]] // CHECK5-NEXT: br i1 [[CMP34]], label [[OMP_INNER_FOR_BODY35:%.*]], label [[OMP_INNER_FOR_END45:%.*]] // CHECK5: omp.inner.for.body35: -// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !llvm.access.group !13 -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL36:%.*]] = mul i32 [[TMP29]], 1 // CHECK5-NEXT: [[ADD37:%.*]] = add i32 [[TMP28]], [[MUL36]] -// CHECK5-NEXT: store i32 [[ADD37]], ptr [[I31]], align 4, !llvm.access.group !13 -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[I31]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: store i32 [[ADD37]], ptr [[I31]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[I31]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP30]] to i64 // CHECK5-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM38]] -// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX39]], align 4, !llvm.access.group !13 -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[I31]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: store float 0.000000e+00, ptr [[ARRAYIDX39]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[I31]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM40:%.*]] = sext i32 [[TMP31]] to i64 // CHECK5-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [10 x float], ptr @f, i64 0, i64 [[IDXPROM40]] -// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX41]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: store float 1.000000e+00, ptr [[ARRAYIDX41]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE42:%.*]] // CHECK5: omp.body.continue42: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC43:%.*]] // CHECK5: omp.inner.for.inc43: -// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group !13 +// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV30]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD44:%.*]] = add i32 [[TMP32]], 1 -// CHECK5-NEXT: store i32 [[ADD44]], ptr [[DOTOMP_IV30]], align 4, !llvm.access.group !13 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD44]], ptr [[DOTOMP_IV30]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND32]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK5: omp.inner.for.end45: -// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 -// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4 -// CHECK5-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4 +// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_20]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_19]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB46:%.*]] = sub i32 [[TMP34]], [[TMP35]] // CHECK5-NEXT: [[SUB47:%.*]] = sub i32 [[SUB46]], 1 // CHECK5-NEXT: [[ADD48:%.*]] = add i32 [[SUB47]], 1 diff --git a/clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp b/clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/parallel_for_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[ARGC_ADDR]], ptr [[TMP0]]) // CHECK1-NEXT: ret i32 0 // @@ -82,14 +82,14 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -125,198 +125,198 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP57]], 9 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP52]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP53]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP58]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP54]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP59]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP55]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP60]], [[TMP61]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP56]], [[TMP57]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP62]], 1 +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP58]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP63]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP64]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP66:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP66]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4 -// CHECK1-NEXT: [[TMP69:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP68]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP69]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[TMP72]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP73]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP69]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP75]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP77:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP77]], ptr [[TMP76]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4 -// CHECK1-NEXT: [[TMP80:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP79]], ptr [[TMP69]]) +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP59]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP60]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP62:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP62]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[TMP63]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP65:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP64]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP65]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP66]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[TMP67]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP68]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP65]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP69]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP71]], ptr [[TMP70]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = load i32, ptr [[TMP72]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP74:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP73]], ptr [[TMP65]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP81:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP81]], 1 +// CHECK1-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP75]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP82:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP82]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP83]]) +// CHECK1-NEXT: [[TMP76:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP76]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP77]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP79]], i32 1) +// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP80]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP81]], align 8 +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP83:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP82]], align 8 // CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP85]], i32 1) -// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP86]], align 8 -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP90:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP90]], ptr [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[TMP91]], align 4 -// CHECK1-NEXT: [[TMP94:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP92]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP94]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP86:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP85]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP86]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP95:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP95]], [[TMP96]] +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP87]], [[TMP88]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP97]] +// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP89]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP98]] to i32 -// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP90:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP90]] to i32 +// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP91]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP97]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP89]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP92]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP85]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP101:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP100]] monotonic, align 4 -// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP102]] +// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP93:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP92]] monotonic, align 4 +// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP94]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP103:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP103]] to i32 +// CHECK1-NEXT: [[TMP95:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP95]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP104:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP109:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP104]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP105]] to i32 -// CHECK1-NEXT: [[TMP106:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP106]] to i32 +// CHECK1-NEXT: [[TMP96:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP101:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP96]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP97]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP98]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP108:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP104]], i8 [[TMP107]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP109]] = extractvalue { i8, i1 } [[TMP108]], 0 -// CHECK1-NEXT: [[TMP110:%.*]] = extractvalue { i8, i1 } [[TMP108]], 1 -// CHECK1-NEXT: br i1 [[TMP110]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP100:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP96]], i8 [[TMP99]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP101]] = extractvalue { i8, i1 } [[TMP100]], 0 +// CHECK1-NEXT: [[TMP102:%.*]] = extractvalue { i8, i1 } [[TMP100]], 1 +// CHECK1-NEXT: br i1 [[TMP102]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP102]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP94]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP111:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP111]]) +// CHECK1-NEXT: [[TMP103:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP103]]) // CHECK1-NEXT: ret void // // @@ -327,8 +327,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -339,12 +339,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -357,7 +357,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -379,7 +379,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -388,9 +388,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -433,65 +433,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -503,38 +503,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/parallel_for_simd_aligned_codegen.cpp b/clang/test/OpenMP/parallel_for_simd_aligned_codegen.cpp --- a/clang/test/OpenMP/parallel_for_simd_aligned_codegen.cpp +++ b/clang/test/OpenMP/parallel_for_simd_aligned_codegen.cpp @@ -47,52 +47,52 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TMP0]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP1]], i64 16) ] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: diff --git a/clang/test/OpenMP/parallel_for_simd_codegen.cpp b/clang/test/OpenMP/parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/parallel_for_simd_codegen.cpp @@ -192,10 +192,10 @@ // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* {{.+}}, i32 %{{.+}}) // // Linear start and step are used to calculate final value of the linear variables. -// CHECK: [[LIN:%.+]] = load i32, i32* [[LIN_PRIV]] -// CHECK: store i32 [[LIN]], i32* [[LIN_VAR]], -// CHECK: [[GLIN:%.+]] = load double*, double** [[G_PTR_CUR]] -// CHECK: store double* [[GLIN]], double** [[GLIN_VAR]], +// CHECK: [[LIN:%.+]] = load i32, i32* [[LIN_PRIV]], {{.*}} +// CHECK: store i32 [[LIN]], i32* [[LIN_VAR]], {{.*}} +// CHECK: [[GLIN:%.+]] = load double*, double** [[G_PTR_CUR]], {{.*}} +// CHECK: store double* [[GLIN]], double** [[GLIN_VAR]] #pragma omp parallel for simd // CHECK: call void @__kmpc_for_static_init_4(%struct.ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) @@ -796,7 +796,7 @@ // OMP50-LABEL: define internal void @.omp_outlined..14 // OMP50: call void @__kmpc_for_static_init_4 // OMP50: omp.inner.for.body: -// OMP50: %{{[0-9]+}} = atomicrmw add i32* %{{[0-9]+}}, i32 1 monotonic, align 4, !llvm.access.group !47 +// OMP50: %{{[0-9]+}} = atomicrmw add i32* %{{[0-9]+}}, i32 1 monotonic, align 4, !llvm.access.group !48 // OMP45-NOT: !{!"llvm.loop.vectorize.enable", i1 false} // OMP45-DAG: ![[VECT]] = distinct !{![[VECT]], ![[PA:.+]], ![[VM:.+]]} diff --git a/clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp b/clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp --- a/clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/parallel_master_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[ARGC_ADDR]], ptr [[TMP0]]) // CHECK1-NEXT: ret i32 0 // @@ -71,14 +71,14 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -114,164 +114,164 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: [[TMP57:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP56]]) -// CHECK1-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 -// CHECK1-NEXT: br i1 [[TMP58]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP53:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP52]]) +// CHECK1-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK1-NEXT: br i1 [[TMP54]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP59]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP60]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP62:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: store ptr [[TMP62]], ptr [[TMP61]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[TMP63]], align 4 -// CHECK1-NEXT: [[TMP65:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP64]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP65]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP67]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[TMP68]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP69]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP65]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP72]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP74]], align 4 -// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP75]], ptr [[TMP65]]) -// CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP56]]) +// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP55]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP56]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP58]], ptr [[TMP57]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[TMP59]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP61:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP60]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP61]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP62]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[TMP63]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP64]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP61]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP65]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP67]], ptr [[TMP66]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = load i32, ptr [[TMP68]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP70:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP69]], ptr [[TMP61]]) +// CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP52]]) // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP71]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP72]], i32 0) +// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP73]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP74]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP76:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP76]], ptr [[TMP75]], align 8 // CHECK1-NEXT: [[TMP77:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP77]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP78]], i32 0) -// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP79]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP81]], align 8 -// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP83:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP82]], align 8 -// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: [[TMP87:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP85]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP87]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP77]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP79:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP78]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP79]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP88]], [[TMP89]] +// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP80]], [[TMP81]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP90]] +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP82]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP91]] to i32 -// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP83:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP83]] to i32 +// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP84]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP90]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP82]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done18: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP85]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP78]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP93:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP94:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP93]] monotonic, align 4 -// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP95]] +// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP86:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP85]] monotonic, align 4 +// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP87]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]] // CHECK1: omp.arraycpy.body20: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP96:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP96]] to i32 +// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP88]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP97:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP102:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP97]], ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP98]] to i32 -// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP89:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP94:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP89]], ptr [[_TMP24]], align 1 +// CHECK1-NEXT: [[TMP90:%.*]] = load i8, ptr [[_TMP24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP90]] to i32 +// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP91]] to i32 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]] // CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8 // CHECK1-NEXT: store i8 [[CONV28]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP101:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP97]], i8 [[TMP100]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP102]] = extractvalue { i8, i1 } [[TMP101]], 0 -// CHECK1-NEXT: [[TMP103:%.*]] = extractvalue { i8, i1 } [[TMP101]], 1 -// CHECK1-NEXT: br i1 [[TMP103]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP93:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP89]], i8 [[TMP92]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP94]] = extractvalue { i8, i1 } [[TMP93]], 0 +// CHECK1-NEXT: [[TMP95:%.*]] = extractvalue { i8, i1 } [[TMP93]], 1 +// CHECK1-NEXT: br i1 [[TMP95]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP95]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP87]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]] // CHECK1: omp.arraycpy.done32: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) +// CHECK1-NEXT: [[TMP96:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP96]]) // CHECK1-NEXT: ret void // // @@ -282,8 +282,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -294,12 +294,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -312,7 +312,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -334,7 +334,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -343,9 +343,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -388,65 +388,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -458,38 +458,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/parallel_master_taskloop_codegen.cpp b/clang/test/OpenMP/parallel_master_taskloop_codegen.cpp --- a/clang/test/OpenMP/parallel_master_taskloop_codegen.cpp +++ b/clang/test/OpenMP/parallel_master_taskloop_codegen.cpp @@ -71,33 +71,33 @@ // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP3]]) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK1-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[TOBOOL5]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL7]], ptr [[DOTCAPTURE_EXPR__CASTED6]], align 1 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED8]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP13]] to i1 // CHECK1-NEXT: br i1 [[TOBOOL9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -128,13 +128,13 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 33, i64 80, i64 1, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 4 @@ -147,7 +147,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP10]], align 8 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP8]], ptr [[TMP9]], i64 [[TMP12]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -176,55 +176,55 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: .omp_outlined..1.exit: // CHECK1-NEXT: ret i32 0 @@ -242,12 +242,12 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, ptr @.omp_task_entry..4) // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 5 @@ -258,7 +258,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP7]], ptr [[TMP8]], i64 [[TMP11]], i32 1, i32 1, i64 [[TMP12]], ptr null) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -287,55 +287,55 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !28 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !29 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !28 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !28 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !29 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !29 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: .omp_outlined..3.exit: // CHECK1-NEXT: ret i32 0 @@ -366,7 +366,7 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -375,29 +375,29 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP6]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP11]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP14]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP13]], i64 [[IDXPROM7]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1 +// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP15]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP16]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[CONV10:%.*]] = sext i32 [[DIV]] to i64 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[TMP17]], [[TMP18]] // CHECK1-NEXT: [[SUB12:%.*]] = sub i32 [[SUB11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB12]], 1 @@ -409,21 +409,21 @@ // CHECK1-NEXT: [[TMP19:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i64 80, i64 16, ptr @.omp_task_entry..7) // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP19]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP20]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP22]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP23]] to i1 // CHECK1-NEXT: [[TMP24:%.*]] = sext i1 [[TOBOOL]] to i32 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP20]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP20]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP27]], ptr [[TMP26]], align 8 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP20]], i32 0, i32 7 // CHECK1-NEXT: store i64 1, ptr [[TMP28]], align 8 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP20]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP29]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP28]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP31:%.*]] = zext i32 [[TMP8]] to i64 // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP19]], i32 [[TMP24]], ptr [[TMP25]], ptr [[TMP26]], i64 [[TMP30]], i32 1, i32 2, i64 [[TMP31]], ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -460,115 +460,115 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META32:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !40 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !40 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !40 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META35:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !41 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !41 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !41 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !41 // CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 -// CHECK1-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP20]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !41 // CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP27]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP25]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP29:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM4_I:%.*]] = sext i32 [[TMP30]] to i64 // CHECK1-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds i8, ptr [[TMP28]], i64 [[IDXPROM4_I]] -// CHECK1-NEXT: [[TMP31:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1 +// CHECK1-NEXT: [[TMP31:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP31]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !40 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP32]] to i64 -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB8_I:%.*]] = sub i32 [[TMP33]], [[TMP34]] // CHECK1-NEXT: [[SUB9_I:%.*]] = sub i32 [[SUB8_I]], 1 // CHECK1-NEXT: [[CONV11_I:%.*]] = zext i32 [[SUB8_I]] to i64 // CHECK1-NEXT: [[MUL_I:%.*]] = mul nsw i64 [[CONV7_I]], [[CONV11_I]] // CHECK1-NEXT: [[SUB12_I:%.*]] = sub nsw i64 [[MUL_I]], 1 -// CHECK1-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 -// CHECK1-NEXT: store i32 [[TMP35]], ptr [[J_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !40 +// CHECK1-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !41 +// CHECK1-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !41 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP35]], ptr [[J_I]], align 4, !noalias !41 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP36]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[LAND_LHS_TRUE_I:%.*]], label [[DOTOMP_OUTLINED__6_EXIT:%.*]] // CHECK1: land.lhs.true.i: -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[TMP37]], [[TMP38]] // CHECK1-NEXT: br i1 [[CMP13_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[DOTOMP_OUTLINED__6_EXIT]] // CHECK1: taskloop.if.then.i: -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !40 -// CHECK1-NEXT: store i64 [[TMP39]], ptr [[DOTOMP_IV_I]], align 8, !noalias !40 +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP39]], ptr [[DOTOMP_IV_I]], align 8, !noalias !41 // CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[TMP18]], align 8 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !40 -// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !40 +// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP16_I:%.*]] = icmp ule i64 [[TMP43]], [[TMP44]] // CHECK1-NEXT: br i1 [[CMP16_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !40 -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB17_I:%.*]] = sub i32 [[TMP46]], [[TMP47]] // CHECK1-NEXT: [[SUB18_I:%.*]] = sub i32 [[SUB17_I]], 1 // CHECK1-NEXT: [[CONV22_I:%.*]] = zext i32 [[SUB17_I]] to i64 // CHECK1-NEXT: [[DIV23_I:%.*]] = sdiv i64 [[TMP45]], [[CONV22_I]] // CHECK1-NEXT: [[CONV26_I:%.*]] = trunc i64 [[DIV23_I]] to i32 -// CHECK1-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !41 +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV27_I:%.*]] = sext i32 [[TMP48]] to i64 -// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !40 -// CHECK1-NEXT: [[TMP50:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !40 -// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP49:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB28_I:%.*]] = sub i32 [[TMP51]], [[TMP52]] // CHECK1-NEXT: [[SUB29_I:%.*]] = sub i32 [[SUB28_I]], 1 // CHECK1-NEXT: [[CONV33_I:%.*]] = zext i32 [[SUB28_I]] to i64 // CHECK1-NEXT: [[DIV34_I:%.*]] = sdiv i64 [[TMP50]], [[CONV33_I]] -// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !40 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB35_I:%.*]] = sub i32 [[TMP53]], [[TMP54]] // CHECK1-NEXT: [[SUB36_I:%.*]] = sub i32 [[SUB35_I]], 1 // CHECK1-NEXT: [[CONV40_I:%.*]] = zext i32 [[SUB35_I]] to i64 @@ -576,10 +576,10 @@ // CHECK1-NEXT: [[SUB42_I:%.*]] = sub nsw i64 [[TMP49]], [[MUL41_I]] // CHECK1-NEXT: [[ADD44_I:%.*]] = add nsw i64 [[CONV27_I]], [[SUB42_I]] // CHECK1-NEXT: [[CONV45_I:%.*]] = trunc i64 [[ADD44_I]] to i32 -// CHECK1-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !40 -// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !40 +// CHECK1-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !41 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !41, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD46_I:%.*]] = add nsw i64 [[TMP55]], 1 -// CHECK1-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !40 +// CHECK1-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !41 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: omp.inner.for.end.i: // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__6_EXIT]] @@ -597,7 +597,7 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -613,7 +613,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP8]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP5]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP9]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP4]], i32 1, ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP10]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -643,77 +643,77 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !52 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !52 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !52 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !52 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !53 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP23]], i32 4) // CHECK1-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK1-NEXT: br i1 [[TMP25]], label [[DOTCANCEL_EXIT_I:%.*]], label [[DOTCANCEL_CONTINUE_I:%.*]] // CHECK1: .cancel.exit.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: .cancel.continue.i: -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_cancellationpoint(ptr @[[GLOB1]], i32 [[TMP26]], i32 4) // CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK1-NEXT: br i1 [[TMP28]], label [[DOTCANCEL_EXIT2_I:%.*]], label [[DOTCANCEL_CONTINUE3_I:%.*]] // CHECK1: .cancel.exit2.i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT]] // CHECK1: .cancel.continue3.i: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: omp.inner.for.end.i: -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT]] // CHECK1: .omp_outlined..9.exit: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: ret i32 0 // // @@ -732,7 +732,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -747,15 +747,15 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK1-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL2]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..11, ptr [[THIS1]], ptr [[C_ADDR]], i64 [[TMP2]]) // CHECK1-NEXT: ret void // @@ -778,10 +778,10 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -791,12 +791,12 @@ // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK1-NEXT: store ptr [[TMP]], ptr [[_TMP1]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 @@ -806,19 +806,19 @@ // CHECK1-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 [[TMP12]], i64 80, i64 16, ptr @.omp_task_entry..13) // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[TMP19]] to i64 // CHECK1-NEXT: store i64 [[CONV]], ptr [[TMP18]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 7 // CHECK1-NEXT: store i64 1, ptr [[TMP20]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP21]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP13]], i32 1, ptr [[TMP17]], ptr [[TMP18]], i64 [[TMP22]], i32 1, i32 2, i64 4, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -854,75 +854,75 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META62:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !64 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !65 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON_6:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB3_I:%.*]] = sub nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !64 -// CHECK1-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !64 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !64 +// CHECK1-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !65 +// CHECK1-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !65 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !65 // CHECK1-NEXT: store i32 0, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP25]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[DOTOMP_OUTLINED__12_EXIT:%.*]] // CHECK1: taskloop.if.then.i: -// CHECK1-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !64 -// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !64 +// CHECK1-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !65 +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP26]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !64 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !65 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_6]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP29]] to i64 -// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !64 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP8_I:%.*]] = icmp ule i64 [[CONV7_I]], [[TMP30]] // CHECK1-NEXT: br i1 [[CMP8_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !64 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !65 // CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4 -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD9_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK1-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !64 +// CHECK1-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !65 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: omp.inner.for.end.i: // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__12_EXIT]] diff --git a/clang/test/OpenMP/parallel_master_taskloop_lastprivate_codegen.cpp b/clang/test/OpenMP/parallel_master_taskloop_lastprivate_codegen.cpp --- a/clang/test/OpenMP/parallel_master_taskloop_lastprivate_codegen.cpp +++ b/clang/test/OpenMP/parallel_master_taskloop_lastprivate_codegen.cpp @@ -255,7 +255,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @_ZN1SIdEC2Ed(ptr noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -282,7 +282,7 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ARR_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VAR_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK1-NEXT: br i1 [[TMP7]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -301,7 +301,7 @@ // CHECK1-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP5]], i32 9, i64 120, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP13]], i32 0, i32 1 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP17]], i32 0, i32 0 @@ -327,7 +327,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP24]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP5]], ptr [[TMP13]], i32 1, ptr [[TMP21]], ptr [[TMP22]], i64 [[TMP25]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP5]]) @@ -395,42 +395,42 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]], ptr [[DOTLASTPRIV_PTR_ADDR2_I]], ptr [[DOTLASTPRIV_PTR_ADDR3_I]], ptr [[DOTLASTPRIV_PTR_ADDR4_I]]) #[[ATTR4]] // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 3 // CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8 @@ -445,39 +445,39 @@ // CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 4 // CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR4_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR4_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP40]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV5_I:%.*]] = sext i32 [[TMP41]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV5_I]], [[TMP42]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP43]], ptr [[I_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP36]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP43]], ptr [[I_I]], align 4, !noalias !15 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP36]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP44]], ptr [[TMP38]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 8 [[TMP35]], i64 8, i1 false) // CHECK1-NEXT: store i32 33, ptr [[TMP39]], align 4 -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD7_I:%.*]] = add nsw i32 [[TMP45]], 1 -// CHECK1-NEXT: store i32 [[ADD7_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[ADD7_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: omp.inner.for.end.i: -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0 // CHECK1-NEXT: br i1 [[TMP47]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: .omp.lastprivate.then.i: // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP23]], ptr align 8 [[TMP35]], i64 8, i1 false) -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4 +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP48]], ptr [[TMP25]], align 4 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr [[STRUCT_S:%.*]], ptr [[TMP27]], i64 2 // CHECK1-NEXT: br label [[OMP_ARRAYCPY_BODY_I:%.*]] @@ -491,7 +491,7 @@ // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE_I]], label [[OMP_ARRAYCPY_DONE8_I:%.*]], label [[OMP_ARRAYCPY_BODY_I]] // CHECK1: omp.arraycpy.done8.i: // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP28]], ptr align 4 [[TMP38]], i64 8, i1 false) -// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP39]], align 4 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP39]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP50]], ptr [[TMP34]], align 4 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: @@ -510,7 +510,7 @@ // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP7]], i32 0, i32 0 @@ -624,7 +624,7 @@ // CHECK1-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store double [[TMP0]], ptr [[F]], align 8 // CHECK1-NEXT: ret void // @@ -656,7 +656,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -683,7 +683,7 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ARR_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VAR_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK1-NEXT: br i1 [[TMP7]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -700,7 +700,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP5]], i32 9, i64 256, i64 32, ptr @.omp_task_entry..5) // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP13]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 128 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[AGG_CAPTURED]], i64 32, i1 false) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP12]], i32 0, i32 2 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], ptr [[TMP16]], i32 0, i32 2 @@ -726,7 +726,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP22]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP23]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP22]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP5]], ptr [[TMP12]], i32 1, ptr [[TMP20]], ptr [[TMP21]], i64 [[TMP24]], i32 1, i32 0, i64 0, ptr @.omp_task_dup..6) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP5]]) @@ -788,42 +788,42 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP3]], i32 0, i32 2 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 16 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 16, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 64 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 64, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr @.omp_task_privates_map..4, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !28 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr @.omp_task_privates_map..4, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !29 // CHECK1-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]], ptr [[DOTLASTPRIV_PTR_ADDR2_I]], ptr [[DOTLASTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8 @@ -836,36 +836,36 @@ // CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP29]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[TMP19]], i32 0, i32 3 // CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !28 -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !28 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !29 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !29 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV4_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !28 +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !28 -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 128 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !29 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP41]], ptr [[TMP34]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[TMP36]], i64 4, i1 false) -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD6_I:%.*]] = add nsw i32 [[TMP42]], 1 -// CHECK1-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !28 +// CHECK1-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !29 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: omp.inner.for.end.i: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !28 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !29, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 // CHECK1-NEXT: br i1 [[TMP44]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: .omp.lastprivate.then.i: -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP33]], align 128 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP33]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP45]], ptr [[TMP23]], align 128 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP24]], ptr align 4 [[TMP34]], i64 8, i1 false) // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr [[STRUCT_S_0:%.*]], ptr [[TMP26]], i64 2 @@ -897,7 +897,7 @@ // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 64 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP3]], i32 0, i32 2 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], ptr [[TMP7]], i32 0, i32 2 @@ -973,7 +973,7 @@ // CHECK1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK1-NEXT: ret void // @@ -1007,7 +1007,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1020,7 +1020,7 @@ // CHECK3-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 16, ptr @.omp_task_entry.) // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP6]], i32 0, i32 1 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 5 @@ -1031,7 +1031,7 @@ // CHECK3-NEXT: store i64 1, ptr [[TMP13]], align 8 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP6]], i32 1, ptr [[TMP11]], ptr [[TMP12]], i64 [[TMP15]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK3-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1082,79 +1082,79 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK3-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP27]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK3-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP29]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK3-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !15 // CHECK3-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8 // CHECK3-NEXT: store i32 11, ptr [[TMP26]], align 4 -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[REF_TMP_I]], align 8, !noalias !14 +// CHECK3-NEXT: store ptr [[TMP25]], ptr [[REF_TMP_I]], align 8, !noalias !15 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP_I]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP31]], align 8, !noalias !14 +// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP31]], align 8, !noalias !15 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(16) [[REF_TMP_I]]) -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD3_I:%.*]] = add nsw i32 [[TMP32]], 1 -// CHECK3-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK3: omp.inner.for.end.i: -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK3-NEXT: br i1 [[TMP34]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: .omp.lastprivate.then.i: -// CHECK3-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP25]], align 8 +// CHECK3-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store volatile double [[TMP35]], ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP36]], ptr [[TMP24]], align 4 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: @@ -1173,7 +1173,7 @@ // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: ret void @@ -1210,7 +1210,7 @@ // CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK4-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1223,7 +1223,7 @@ // CHECK4-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 16, ptr @.omp_task_entry.) // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP6]], i32 0, i32 1 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 5 @@ -1234,7 +1234,7 @@ // CHECK4-NEXT: store i64 1, ptr [[TMP13]], align 8 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 9 // CHECK4-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false) -// CHECK4-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK4-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP6]], i32 1, ptr [[TMP11]], ptr [[TMP12]], i64 [[TMP15]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK4-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK4-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1299,93 +1299,93 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK4-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK4-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK4-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK4-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK4-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR4:[0-9]+]] // CHECK4-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 // CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK4-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP27]] to i32 -// CHECK4-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK4-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK4: omp.inner.for.cond.i: -// CHECK4-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK4-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK4-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP29]] // CHECK4-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK4: omp.inner.for.body.i: -// CHECK4-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK4-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !14 +// CHECK4-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !15 // CHECK4-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8 // CHECK4-NEXT: store i32 11, ptr [[TMP26]], align 4 -// CHECK4-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_I]], align 8, !noalias !14 +// CHECK4-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_I]], align 8, !noalias !15 // CHECK4-NEXT: [[BLOCK_FLAGS_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 1 -// CHECK4-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS_I]], align 8, !noalias !14 +// CHECK4-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS_I]], align 8, !noalias !15 // CHECK4-NEXT: [[BLOCK_RESERVED_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 2 -// CHECK4-NEXT: store i32 0, ptr [[BLOCK_RESERVED_I]], align 4, !noalias !14 +// CHECK4-NEXT: store i32 0, ptr [[BLOCK_RESERVED_I]], align 4, !noalias !15 // CHECK4-NEXT: [[BLOCK_INVOKE_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 3 -// CHECK4-NEXT: store ptr @_block_invoke, ptr [[BLOCK_INVOKE_I]], align 8, !noalias !14 +// CHECK4-NEXT: store ptr @_block_invoke, ptr [[BLOCK_INVOKE_I]], align 8, !noalias !15 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 4 -// CHECK4-NEXT: store ptr @__block_descriptor_tmp.2, ptr [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !14 +// CHECK4-NEXT: store ptr @__block_descriptor_tmp.2, ptr [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !15 // CHECK4-NEXT: [[BLOCK_CAPTURED_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 5 -// CHECK4-NEXT: [[TMP31:%.*]] = load volatile double, ptr [[TMP25]], align 8 -// CHECK4-NEXT: store volatile double [[TMP31]], ptr [[BLOCK_CAPTURED_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP31:%.*]] = load volatile double, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store volatile double [[TMP31]], ptr [[BLOCK_CAPTURED_I]], align 8, !noalias !15 // CHECK4-NEXT: [[BLOCK_CAPTURED3_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP26]], align 4 -// CHECK4-NEXT: store i32 [[TMP32]], ptr [[BLOCK_CAPTURED3_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store i32 [[TMP32]], ptr [[BLOCK_CAPTURED3_I]], align 8, !noalias !15 // CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], ptr [[BLOCK_I]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !noalias !15 // CHECK4-NEXT: call void [[TMP34]](ptr noundef [[BLOCK_I]]) #[[ATTR4]] -// CHECK4-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK4-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP35]], 1 -// CHECK4-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK4-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK4: omp.inner.for.end.i: -// CHECK4-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK4-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 // CHECK4-NEXT: br i1 [[TMP37]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK4: .omp.lastprivate.then.i: -// CHECK4-NEXT: [[TMP38:%.*]] = load double, ptr [[TMP25]], align 8 +// CHECK4-NEXT: [[TMP38:%.*]] = load double, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store volatile double [[TMP38]], ptr [[TMP22]], align 8 -// CHECK4-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP39]], ptr [[TMP24]], align 4 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK4: .omp_outlined..1.exit: @@ -1404,7 +1404,7 @@ // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK4-NEXT: ret void @@ -1419,7 +1419,7 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store ptr [[S]], ptr [[S_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 3, ptr @.omp_outlined., i64 [[TMP1]], ptr [[A_ADDR]], ptr [[S_ADDR]]) // CHECK5-NEXT: ret void @@ -1440,11 +1440,11 @@ // CHECK5-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store ptr [[S]], ptr [[S_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK5-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK5-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1459,7 +1459,7 @@ // CHECK5-NEXT: [[TMP10:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i64 96, i64 24, ptr @.omp_task_entry.) // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP10]], i32 0, i32 0 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP11]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP13]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP10]], i32 0, i32 1 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP11]], i32 0, i32 5 @@ -1470,7 +1470,7 @@ // CHECK5-NEXT: store i64 1, ptr [[TMP17]], align 8 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP11]], i32 0, i32 9 // CHECK5-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP18]], i8 0, i64 8, i1 false) -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP4]], ptr [[TMP10]], i32 1, ptr [[TMP15]], ptr [[TMP16]], i64 [[TMP19]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK5-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK5-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP4]]) @@ -1520,75 +1520,75 @@ // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP19]], align 8, !noundef [[NOUNDEF3]] +// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK5-NEXT: call void [[TMP21]](ptr [[TMP22]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR2:[0-9]+]] // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK5-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK5-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8 -// CHECK5-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK5-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP29]] to i32 -// CHECK5-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK5-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK5: omp.inner.for.cond.i: -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP30]] to i64 -// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP31]] // CHECK5-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK5: omp.inner.for.body.i: -// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK5-NEXT: store i32 [[TMP32]], ptr [[I_I]], align 4, !noalias !14 -// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK5-NEXT: store i32 [[TMP32]], ptr [[I_I]], align 4, !noalias !15 +// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[ADD3_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK5-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK5-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK5: omp.inner.for.end.i: -// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 // CHECK5-NEXT: br i1 [[TMP35]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK5: .omp.lastprivate.then.i: -// CHECK5-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP27]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store ptr [[TMP36]], ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP28]], align 8 +// CHECK5-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP28]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store ptr [[TMP37]], ptr [[TMP26]], align 8 // CHECK5-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK5: .omp_outlined..1.exit: @@ -1607,7 +1607,7 @@ // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK5-NEXT: ret void @@ -1634,7 +1634,7 @@ // CHECK6-NEXT: store ptr [[I]], ptr [[I_ADDR]], align 8 // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK6-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP2]]) // CHECK6-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK6-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1645,7 +1645,7 @@ // CHECK6-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i64 88, i64 8, ptr @.omp_task_entry.) // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK6-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP6]], i32 0, i32 1 // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 5 @@ -1656,7 +1656,7 @@ // CHECK6-NEXT: store i64 1, ptr [[TMP13]], align 8 // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 9 // CHECK6-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false) -// CHECK6-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK6-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP2]], ptr [[TMP6]], i32 1, ptr [[TMP11]], ptr [[TMP12]], i64 [[TMP15]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK6-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP2]]) // CHECK6-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP2]]) @@ -1700,69 +1700,69 @@ // CHECK6-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK6-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK6-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK6-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK6-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK6-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK6-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK6-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK6-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]]) #[[ATTR2:[0-9]+]] // CHECK6-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK6-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK6-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP24]] to i32 -// CHECK6-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK6-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK6: omp.inner.for.cond.i: -// CHECK6-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP25]] to i64 -// CHECK6-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK6-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP26]] // CHECK6-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK6: omp.inner.for.body.i: -// CHECK6-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK6-NEXT: store i32 [[TMP27]], ptr [[I_I]], align 4, !noalias !14 -// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK6-NEXT: store i32 [[TMP27]], ptr [[I_I]], align 4, !noalias !15 +// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP28]], 1 -// CHECK6-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK6-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK6: omp.inner.for.end.i: -// CHECK6-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK6-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK6: .omp.lastprivate.then.i: -// CHECK6-NEXT: store i32 10, ptr [[I_I]], align 4, !noalias !14 -// CHECK6-NEXT: [[TMP31:%.*]] = load i32, ptr [[I_I]], align 4, !noalias !14 +// CHECK6-NEXT: store i32 10, ptr [[I_I]], align 4, !noalias !15 +// CHECK6-NEXT: [[TMP31:%.*]] = load i32, ptr [[I_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: store i32 [[TMP31]], ptr [[TMP22]], align 4 // CHECK6-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK6: .omp_outlined..1.exit: @@ -1781,7 +1781,7 @@ // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK6-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK6-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK6-NEXT: ret void diff --git a/clang/test/OpenMP/parallel_master_taskloop_simd_codegen.cpp b/clang/test/OpenMP/parallel_master_taskloop_simd_codegen.cpp --- a/clang/test/OpenMP/parallel_master_taskloop_simd_codegen.cpp +++ b/clang/test/OpenMP/parallel_master_taskloop_simd_codegen.cpp @@ -74,33 +74,33 @@ // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP3]]) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK1-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[TOBOOL5]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL7]], ptr [[DOTCAPTURE_EXPR__CASTED6]], align 1 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED8]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP13]] to i1 // CHECK1-NEXT: br i1 [[TOBOOL9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -130,13 +130,13 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 33, i64 80, i64 1, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 4 @@ -149,7 +149,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP10]], align 8 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP8]], ptr [[TMP9]], i64 [[TMP12]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -178,56 +178,56 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK1: .omp_outlined..1.exit: // CHECK1-NEXT: ret i32 0 // @@ -244,12 +244,12 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, ptr @.omp_task_entry..4) // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 5 @@ -260,7 +260,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP7]], ptr [[TMP8]], i64 [[TMP11]], i32 1, i32 1, i64 [[TMP12]], ptr null) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -289,56 +289,56 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !31 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !31 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !32 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !32, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32:![0-9]+]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK1: .omp_outlined..3.exit: // CHECK1-NEXT: ret i32 0 // @@ -371,7 +371,7 @@ // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK1-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK1-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -382,29 +382,29 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED]], i32 0, i32 2 // CHECK1-NEXT: store ptr [[TMP2]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 [[IDXPROM7]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1 +// CHECK1-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP17]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[CONV10:%.*]] = sext i32 [[DIV]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB11:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK1-NEXT: [[SUB12:%.*]] = sub i32 [[SUB11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB12]], 1 @@ -416,22 +416,22 @@ // CHECK1-NEXT: [[TMP21:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i64 88, i64 24, ptr @.omp_task_entry..7) // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP21]], i32 0, i32 0 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP22]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP24]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP21]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP26]] to i1 // CHECK1-NEXT: [[TMP27:%.*]] = sext i1 [[TOBOOL]] to i32 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP28]], align 8 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP30]], ptr [[TMP29]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 7 // CHECK1-NEXT: store i64 1, ptr [[TMP31]], align 8 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP32]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[TMP31]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP34:%.*]] = zext i32 [[TMP10]] to i64 // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP4]], ptr [[TMP21]], i32 [[TMP27]], ptr [[TMP28]], ptr [[TMP29]], i64 [[TMP33]], i32 1, i32 2, i64 [[TMP34]], ptr @.omp_task_dup.) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) @@ -483,126 +483,126 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !47 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !48 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 // CHECK1-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]]) #[[ATTR2]] // CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !47 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !48 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 -// CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 -// CHECK1-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP35]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4 +// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM4_I:%.*]] = sext i32 [[TMP39]] to i64 // CHECK1-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 [[IDXPROM4_I]] -// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1 +// CHECK1-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP41]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB8_I:%.*]] = sub i32 [[TMP42]], [[TMP43]] // CHECK1-NEXT: [[SUB9_I:%.*]] = sub i32 [[SUB8_I]], 1 // CHECK1-NEXT: [[CONV11_I:%.*]] = zext i32 [[SUB8_I]] to i64 // CHECK1-NEXT: [[MUL_I:%.*]] = mul nsw i64 [[CONV7_I]], [[CONV11_I]] // CHECK1-NEXT: [[SUB12_I:%.*]] = sub nsw i64 [[MUL_I]], 1 -// CHECK1-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !47 -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK1-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !47 -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK1-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !48 +// CHECK1-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !48 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !48 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP45]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[LAND_LHS_TRUE_I:%.*]], label [[TASKLOOP_IF_END_I:%.*]] // CHECK1: land.lhs.true.i: -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] // CHECK1-NEXT: br i1 [[CMP13_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[TASKLOOP_IF_END_I]] // CHECK1: taskloop.if.then.i: -// CHECK1-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK1-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47 +// CHECK1-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP49]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP51]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP16_I:%.*]] = icmp ule i64 [[TMP53]], [[TMP54]] // CHECK1-NEXT: br i1 [[CMP16_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB17_I:%.*]] = sub i32 [[TMP56]], [[TMP57]] // CHECK1-NEXT: [[SUB18_I:%.*]] = sub i32 [[SUB17_I]], 1 // CHECK1-NEXT: [[CONV22_I:%.*]] = zext i32 [[SUB17_I]] to i64 // CHECK1-NEXT: [[DIV23_I:%.*]] = sdiv i64 [[TMP55]], [[CONV22_I]] // CHECK1-NEXT: [[CONV26_I:%.*]] = trunc i64 [[DIV23_I]] to i32 -// CHECK1-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV27_I:%.*]] = sext i32 [[TMP58]] to i64 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB28_I:%.*]] = sub i32 [[TMP61]], [[TMP62]] // CHECK1-NEXT: [[SUB29_I:%.*]] = sub i32 [[SUB28_I]], 1 // CHECK1-NEXT: [[CONV33_I:%.*]] = zext i32 [[SUB28_I]] to i64 // CHECK1-NEXT: [[DIV34_I:%.*]] = sdiv i64 [[TMP60]], [[CONV33_I]] -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB35_I:%.*]] = sub i32 [[TMP63]], [[TMP64]] // CHECK1-NEXT: [[SUB36_I:%.*]] = sub i32 [[SUB35_I]], 1 // CHECK1-NEXT: [[CONV40_I:%.*]] = zext i32 [[SUB35_I]] to i64 @@ -610,15 +610,15 @@ // CHECK1-NEXT: [[SUB42_I:%.*]] = sub nsw i64 [[TMP59]], [[MUL41_I]] // CHECK1-NEXT: [[ADD44_I:%.*]] = add nsw i64 [[CONV27_I]], [[SUB42_I]] // CHECK1-NEXT: [[CONV45_I:%.*]] = trunc i64 [[ADD44_I]] to i32 -// CHECK1-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD46_I:%.*]] = add nsw i64 [[TMP65]], 1 -// CHECK1-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK1-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK1: omp.inner.for.end.i: // CHECK1-NEXT: br label [[TASKLOOP_IF_END_I]] // CHECK1: taskloop.if.end.i: -// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0 // CHECK1-NEXT: br i1 [[TMP67]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__6_EXIT:%.*]] // CHECK1: .omp.lastprivate.then.i: @@ -639,7 +639,7 @@ // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: ret void @@ -660,7 +660,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -675,15 +675,15 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK1-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL2]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..8, ptr [[THIS1]], ptr [[C_ADDR]], i64 [[TMP2]]) // CHECK1-NEXT: ret void // @@ -706,10 +706,10 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK1-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -719,12 +719,12 @@ // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK1-NEXT: store ptr [[TMP]], ptr [[_TMP1]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 @@ -734,19 +734,19 @@ // CHECK1-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 [[TMP12]], i64 80, i64 16, ptr @.omp_task_entry..10) // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[TMP19]] to i64 // CHECK1-NEXT: store i64 [[CONV]], ptr [[TMP18]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 7 // CHECK1-NEXT: store i64 1, ptr [[TMP20]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP21]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP13]], i32 1, ptr [[TMP17]], ptr [[TMP18]], i64 [[TMP22]], i32 1, i32 2, i64 4, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -782,76 +782,76 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK1-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !62 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META52:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META55:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !63 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 -// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[SUB3_I:%.*]] = sub nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !62 -// CHECK1-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !62 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !62 +// CHECK1-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !63 +// CHECK1-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !63 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !63 // CHECK1-NEXT: store i32 0, ptr [[TMP24]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP25]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: taskloop.if.then.i: -// CHECK1-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !62 -// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !62 +// CHECK1-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !63 +// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !63, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP26]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !62 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !63 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[TMP18]], i32 0, i32 1 // CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63:![0-9]+]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP29]] to i64 -// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !62, !llvm.access.group [[ACC_GRP63]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP8_I:%.*]] = icmp ule i64 [[CONV7_I]], [[TMP30]] // CHECK1-NEXT: br i1 [[CMP8_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP63]] -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !63, !llvm.access.group [[ACC_GRP64]] +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP64]] +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD9_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK1-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP64:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP65:![0-9]+]] // CHECK1: omp.inner.for.end.i: // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT]] // CHECK1: .omp_outlined..9.exit: @@ -886,33 +886,33 @@ // CHECK2-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK2-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK2-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK2-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK2-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP3]]) -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4 -// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8 +// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP6]]) -// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK2-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK2-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK2-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK2-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK2-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[TOBOOL5]] to i8 // CHECK2-NEXT: store i8 [[FROMBOOL7]], ptr [[DOTCAPTURE_EXPR__CASTED6]], align 1 -// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8 -// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED8]], align 4 -// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8 -// CHECK2-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP13]] to i1 // CHECK2-NEXT: br i1 [[TOBOOL9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK2: omp_if.then: @@ -942,13 +942,13 @@ // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK2: omp_if.then: // CHECK2-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 33, i64 80, i64 1, ptr @.omp_task_entry.) // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 4 @@ -961,7 +961,7 @@ // CHECK2-NEXT: store i64 1, ptr [[TMP10]], align 8 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 8, i1 false) -// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP8]], ptr [[TMP9]], i64 [[TMP12]], i32 1, i32 0, i64 0, ptr null) // CHECK2-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK2-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -990,56 +990,56 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK2: omp.inner.for.cond.i: -// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK2-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK2: omp.inner.for.body.i: -// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK2-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !14 -// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !15 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK2-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK2-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK2: .omp_outlined..1.exit: // CHECK2-NEXT: ret i32 0 // @@ -1056,12 +1056,12 @@ // CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK2: omp_if.then: -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, ptr @.omp_task_entry..4) // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 5 @@ -1072,7 +1072,7 @@ // CHECK2-NEXT: store i64 1, ptr [[TMP9]], align 8 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false) -// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP12:%.*]] = zext i32 [[TMP4]] to i64 // CHECK2-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP7]], ptr [[TMP8]], i64 [[TMP11]], i32 1, i32 1, i64 [[TMP12]], ptr null) // CHECK2-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1101,56 +1101,56 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !31 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !31 -// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 +// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !32 +// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !32, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31 +// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK2: omp.inner.for.cond.i: -// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32:![0-9]+]] +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK2-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK2-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK2: omp.inner.for.body.i: -// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK2-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK2-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK2-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK2: .omp_outlined..3.exit: // CHECK2-NEXT: ret i32 0 // @@ -1183,7 +1183,7 @@ // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK2-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK2-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1194,29 +1194,29 @@ // CHECK2-NEXT: store ptr [[TMP1]], ptr [[TMP8]], align 8 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED]], i32 0, i32 2 // CHECK2-NEXT: store ptr [[TMP2]], ptr [[TMP9]], align 8 -// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP13]], i64 [[IDXPROM]] -// CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64 // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP15]], i64 [[IDXPROM7]] -// CHECK2-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1 +// CHECK2-NEXT: [[TMP17:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV:%.*]] = sext i8 [[TMP17]] to i32 // CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK2-NEXT: [[CONV10:%.*]] = sext i32 [[DIV]] to i64 -// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB11:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK2-NEXT: [[SUB12:%.*]] = sub i32 [[SUB11]], 1 // CHECK2-NEXT: [[ADD:%.*]] = add i32 [[SUB12]], 1 @@ -1228,22 +1228,22 @@ // CHECK2-NEXT: [[TMP21:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i64 88, i64 24, ptr @.omp_task_entry..7) // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP21]], i32 0, i32 0 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP22]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 +// CHECK2-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP24]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) // CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP21]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK2-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP26]] to i1 // CHECK2-NEXT: [[TMP27:%.*]] = sext i1 [[TOBOOL]] to i32 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 5 // CHECK2-NEXT: store i64 0, ptr [[TMP28]], align 8 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i64 [[TMP30]], ptr [[TMP29]], align 8 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 7 // CHECK2-NEXT: store i64 1, ptr [[TMP31]], align 8 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP22]], i32 0, i32 9 // CHECK2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP32]], i8 0, i64 8, i1 false) -// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP33:%.*]] = load i64, ptr [[TMP31]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP34:%.*]] = zext i32 [[TMP10]] to i64 // CHECK2-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP4]], ptr [[TMP21]], i32 [[TMP27]], ptr [[TMP28]], ptr [[TMP29]], i64 [[TMP33]], i32 1, i32 2, i64 [[TMP34]], ptr @.omp_task_dup.) // CHECK2-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) @@ -1295,126 +1295,126 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK2-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !47 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 -// CHECK2-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !48 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48 +// CHECK2-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK2-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 // CHECK2-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]]) #[[ATTR2]] // CHECK2-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !47 +// CHECK2-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !48 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK2-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 -// CHECK2-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK2-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 -// CHECK2-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK2-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK2-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP35]] to i64 // CHECK2-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 [[IDXPROM_I]] -// CHECK2-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8 +// CHECK2-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK2-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[IDXPROM4_I:%.*]] = sext i32 [[TMP39]] to i64 // CHECK2-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 [[IDXPROM4_I]] -// CHECK2-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1 +// CHECK2-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP41]] to i64 -// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB8_I:%.*]] = sub i32 [[TMP42]], [[TMP43]] // CHECK2-NEXT: [[SUB9_I:%.*]] = sub i32 [[SUB8_I]], 1 // CHECK2-NEXT: [[CONV11_I:%.*]] = zext i32 [[SUB8_I]] to i64 // CHECK2-NEXT: [[MUL_I:%.*]] = mul nsw i64 [[CONV7_I]], [[CONV11_I]] // CHECK2-NEXT: [[SUB12_I:%.*]] = sub nsw i64 [[MUL_I]], 1 -// CHECK2-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !47 -// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK2-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !47 -// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK2-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !48 +// CHECK2-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !48 +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !48 +// CHECK2-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP45]] // CHECK2-NEXT: br i1 [[CMP_I]], label [[LAND_LHS_TRUE_I:%.*]], label [[TASKLOOP_IF_END_I:%.*]] // CHECK2: land.lhs.true.i: -// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] // CHECK2-NEXT: br i1 [[CMP13_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[TASKLOOP_IF_END_I]] // CHECK2: taskloop.if.then.i: -// CHECK2-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK2-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47 +// CHECK2-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK2-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP49]], align 8 // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK2-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP51]], align 8 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK2: omp.inner.for.cond.i: -// CHECK2-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK2-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP16_I:%.*]] = icmp ule i64 [[TMP53]], [[TMP54]] // CHECK2-NEXT: br i1 [[CMP16_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK2: omp.inner.for.body.i: -// CHECK2-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB17_I:%.*]] = sub i32 [[TMP56]], [[TMP57]] // CHECK2-NEXT: [[SUB18_I:%.*]] = sub i32 [[SUB17_I]], 1 // CHECK2-NEXT: [[CONV22_I:%.*]] = zext i32 [[SUB17_I]] to i64 // CHECK2-NEXT: [[DIV23_I:%.*]] = sdiv i64 [[TMP55]], [[CONV22_I]] // CHECK2-NEXT: [[CONV26_I:%.*]] = trunc i64 [[DIV23_I]] to i32 -// CHECK2-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK2-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV27_I:%.*]] = sext i32 [[TMP58]] to i64 -// CHECK2-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB28_I:%.*]] = sub i32 [[TMP61]], [[TMP62]] // CHECK2-NEXT: [[SUB29_I:%.*]] = sub i32 [[SUB28_I]], 1 // CHECK2-NEXT: [[CONV33_I:%.*]] = zext i32 [[SUB28_I]] to i64 // CHECK2-NEXT: [[DIV34_I:%.*]] = sdiv i64 [[TMP60]], [[CONV33_I]] -// CHECK2-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB35_I:%.*]] = sub i32 [[TMP63]], [[TMP64]] // CHECK2-NEXT: [[SUB36_I:%.*]] = sub i32 [[SUB35_I]], 1 // CHECK2-NEXT: [[CONV40_I:%.*]] = zext i32 [[SUB35_I]] to i64 @@ -1422,15 +1422,15 @@ // CHECK2-NEXT: [[SUB42_I:%.*]] = sub nsw i64 [[TMP59]], [[MUL41_I]] // CHECK2-NEXT: [[ADD44_I:%.*]] = add nsw i64 [[CONV27_I]], [[SUB42_I]] // CHECK2-NEXT: [[CONV45_I:%.*]] = trunc i64 [[ADD44_I]] to i32 -// CHECK2-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK2-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK2-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[ADD46_I:%.*]] = add nsw i64 [[TMP65]], 1 -// CHECK2-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK2-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK2: omp.inner.for.end.i: // CHECK2-NEXT: br label [[TASKLOOP_IF_END_I]] // CHECK2: taskloop.if.end.i: -// CHECK2-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 +// CHECK2-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0 // CHECK2-NEXT: br i1 [[TMP67]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__6_EXIT:%.*]] // CHECK2: .omp.lastprivate.then.i: @@ -1451,7 +1451,7 @@ // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK2-NEXT: ret void @@ -1467,15 +1467,15 @@ // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK2-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK2-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK2-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK2-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK2-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL2]] to i8 // CHECK2-NEXT: store i8 [[FROMBOOL3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..8, ptr [[THIS1]], ptr [[C_ADDR]], i64 [[TMP2]]) // CHECK2-NEXT: ret void // @@ -1498,10 +1498,10 @@ // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK2-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK2-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1511,12 +1511,12 @@ // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 // CHECK2-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK2-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK2-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK2-NEXT: store ptr [[TMP]], ptr [[_TMP1]], align 8 -// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], 0 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 @@ -1526,19 +1526,19 @@ // CHECK2-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 [[TMP12]], i64 80, i64 16, ptr @.omp_task_entry..10) // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 5 // CHECK2-NEXT: store i64 0, ptr [[TMP17]], align 8 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV:%.*]] = sext i32 [[TMP19]] to i64 // CHECK2-NEXT: store i64 [[CONV]], ptr [[TMP18]], align 8 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 7 // CHECK2-NEXT: store i64 1, ptr [[TMP20]], align 8 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK2-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP21]], i8 0, i64 8, i1 false) -// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP13]], i32 1, ptr [[TMP17]], ptr [[TMP18]], i64 [[TMP22]], i32 1, i32 2, i64 4, ptr null) // CHECK2-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK2-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -1574,76 +1574,76 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK2-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK2-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !62 +// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META52:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META55:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !63 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK2-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 -// CHECK2-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 -// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[SUB3_I:%.*]] = sub nsw i32 [[TMP23]], 1 -// CHECK2-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !62 -// CHECK2-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !62 -// CHECK2-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !62 +// CHECK2-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !63 +// CHECK2-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !63 +// CHECK2-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !63 // CHECK2-NEXT: store i32 0, ptr [[TMP24]], align 4 -// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !62 +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !63, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP25]] // CHECK2-NEXT: br i1 [[CMP_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK2: taskloop.if.then.i: -// CHECK2-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !62 -// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !62 +// CHECK2-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !63 +// CHECK2-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !63, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP26]] to i32 -// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !62 +// CHECK2-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !63 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[TMP18]], i32 0, i32 1 // CHECK2-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK2: omp.inner.for.cond.i: -// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63:![0-9]+]] +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP29]] to i64 -// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !62, !llvm.access.group [[ACC_GRP63]] +// CHECK2-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CMP8_I:%.*]] = icmp ule i64 [[CONV7_I]], [[TMP30]] // CHECK2-NEXT: br i1 [[CMP8_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK2: omp.inner.for.body.i: -// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK2-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK2-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP63]] -// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !63, !llvm.access.group [[ACC_GRP64]] +// CHECK2-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP64]] +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]], !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[ADD9_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK2-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !62, !llvm.access.group [[ACC_GRP63]] -// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP64:![0-9]+]] +// CHECK2-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !63, !llvm.access.group [[ACC_GRP64]] +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP65:![0-9]+]] // CHECK2: omp.inner.for.end.i: // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT]] // CHECK2: .omp_outlined..9.exit: @@ -1658,7 +1658,7 @@ // CHECK2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK2-NEXT: ret void // @@ -1698,33 +1698,33 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK3-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP3]]) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_3]], align 1 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK3-NEXT: [[FROMBOOL7:%.*]] = zext i1 [[TOBOOL5]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL7]], ptr [[DOTCAPTURE_EXPR__CASTED6]], align 1 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED8]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED8]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_3]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP13]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL9]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -1754,13 +1754,13 @@ // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 33, i64 80, i64 1, ptr @.omp_task_entry.) // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 4 @@ -1773,7 +1773,7 @@ // CHECK3-NEXT: store i64 1, ptr [[TMP10]], align 8 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP11]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP8]], ptr [[TMP9]], i64 [[TMP12]], i32 1, i32 0, i64 0, ptr null) // CHECK3-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1802,56 +1802,56 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK3-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK3-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !14 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !15 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK3-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK3: .omp_outlined..1.exit: // CHECK3-NEXT: ret i32 0 // @@ -1868,12 +1868,12 @@ // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] // CHECK3: omp_if.then: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 80, i64 1, ptr @.omp_task_entry..4) // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP6]], i32 0, i32 5 @@ -1884,7 +1884,7 @@ // CHECK3-NEXT: store i64 1, ptr [[TMP9]], align 8 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP6]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP10]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP12:%.*]] = zext i32 [[TMP4]] to i64 // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP5]], i32 1, ptr [[TMP7]], ptr [[TMP8]], i64 [[TMP11]], i32 1, i32 1, i64 [[TMP12]], ptr null) // CHECK3-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1913,56 +1913,56 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !31 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !31 -// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !31 -// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !31 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !32 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !32, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32:![0-9]+]] +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV1_I:%.*]] = sext i32 [[TMP20]] to i64 -// CHECK3-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK3-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV1_I]], [[TMP21]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK3-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP22]], ptr [[I_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD2_I:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK3-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !31, !llvm.access.group [[ACC_GRP32]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD2_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK3: .omp_outlined..3.exit: // CHECK3-NEXT: ret i32 0 // @@ -1995,7 +1995,7 @@ // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK3-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK3-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -2007,33 +2007,33 @@ // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED]], i32 0, i32 2 // CHECK3-NEXT: store ptr [[TMP2]], ptr [[TMP9]], align 8 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[AGG_CAPTURED]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[TMP10]], align 8 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP15]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP18]] to i64 // CHECK3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 [[IDXPROM7]] -// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1 +// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP19]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[CONV10:%.*]] = sext i32 [[DIV]] to i64 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_6]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB11:%.*]] = sub i32 [[TMP21]], [[TMP22]] // CHECK3-NEXT: [[SUB12:%.*]] = sub i32 [[SUB11]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB12]], 1 @@ -2045,22 +2045,22 @@ // CHECK3-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i64 88, i64 32, ptr @.omp_task_entry..7) // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP23]], i32 0, i32 0 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP24]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8 +// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP26]], ptr align 8 [[AGG_CAPTURED]], i64 32, i1 false) // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP23]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP28:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP28:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP28]] to i1 // CHECK3-NEXT: [[TMP29:%.*]] = sext i1 [[TOBOOL16]] to i32 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP24]], i32 0, i32 5 // CHECK3-NEXT: store i64 0, ptr [[TMP30]], align 8 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP24]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8 +// CHECK3-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i64 [[TMP32]], ptr [[TMP31]], align 8 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP24]], i32 0, i32 7 // CHECK3-NEXT: store i64 1, ptr [[TMP33]], align 8 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP24]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP34]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP35:%.*]] = load i64, ptr [[TMP33]], align 8 +// CHECK3-NEXT: [[TMP35:%.*]] = load i64, ptr [[TMP33]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP36:%.*]] = zext i32 [[TMP12]] to i64 // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP4]], ptr [[TMP23]], i32 [[TMP29]], ptr [[TMP30]], ptr [[TMP31]], i64 [[TMP35]], i32 1, i32 2, i64 [[TMP36]], ptr @.omp_task_dup.) // CHECK3-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) @@ -2112,131 +2112,131 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !47 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 -// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !47 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !48 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48 +// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !48 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !48 // CHECK3-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]]) #[[ATTR2]] // CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !47 +// CHECK3-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !48 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON_2:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 -// CHECK3-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 -// CHECK3-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP29]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK3-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 +// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 -// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP35]] to i64 // CHECK3-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 [[IDXPROM_I]] -// CHECK3-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8 +// CHECK3-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX_I]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 -// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4 +// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[IDXPROM4_I:%.*]] = sext i32 [[TMP39]] to i64 // CHECK3-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 [[IDXPROM4_I]] -// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1 +// CHECK3-NEXT: [[TMP40:%.*]] = load i8, ptr [[ARRAYIDX5_I]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = sext i8 [[TMP40]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP41]] to i64 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB8_I:%.*]] = sub i32 [[TMP42]], [[TMP43]] // CHECK3-NEXT: [[SUB9_I:%.*]] = sub i32 [[SUB8_I]], 1 // CHECK3-NEXT: [[CONV11_I:%.*]] = zext i32 [[SUB8_I]] to i64 // CHECK3-NEXT: [[MUL_I:%.*]] = mul nsw i64 [[CONV7_I]], [[CONV11_I]] // CHECK3-NEXT: [[SUB12_I:%.*]] = sub nsw i64 [[MUL_I]], 1 -// CHECK3-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK3-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !47 +// CHECK3-NEXT: store i64 [[SUB12_I]], ptr [[DOTCAPTURE_EXPR_6_I]], align 8, !noalias !48 +// CHECK3-NEXT: store i32 0, ptr [[I_I]], align 4, !noalias !48 +// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP44]], ptr [[J_I]], align 4, !noalias !48 +// CHECK3-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP45]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[LAND_LHS_TRUE_I:%.*]], label [[TASKLOOP_IF_END_I:%.*]] // CHECK3: land.lhs.true.i: -// CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[TMP46]], [[TMP47]] // CHECK3-NEXT: br i1 [[CMP13_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[TASKLOOP_IF_END_I]] // CHECK3: taskloop.if.then.i: -// CHECK3-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !47 -// CHECK3-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47 +// CHECK3-NEXT: [[TMP48:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i64 [[TMP48]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48 // CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP50:%.*]] = load ptr, ptr [[TMP49]], align 8 // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 2 // CHECK3-NEXT: [[TMP52:%.*]] = load ptr, ptr [[TMP51]], align 8 // CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_ANON_2]], ptr [[TMP19]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1 +// CHECK3-NEXT: [[TMP54:%.*]] = load i8, ptr [[TMP53]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL_I:%.*]] = trunc i8 [[TMP54]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL_I]], label [[OMP_IF_THEN_I:%.*]], label [[OMP_IF_ELSE_I:%.*]] // CHECK3: omp_if.then.i: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK3-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP16_I:%.*]] = icmp ule i64 [[TMP55]], [[TMP56]] // CHECK3-NEXT: br i1 [[CMP16_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB17_I:%.*]] = sub i32 [[TMP58]], [[TMP59]] // CHECK3-NEXT: [[SUB18_I:%.*]] = sub i32 [[SUB17_I]], 1 // CHECK3-NEXT: [[CONV22_I:%.*]] = zext i32 [[SUB17_I]] to i64 // CHECK3-NEXT: [[DIV23_I:%.*]] = sdiv i64 [[TMP57]], [[CONV22_I]] // CHECK3-NEXT: [[CONV26_I:%.*]] = trunc i64 [[DIV23_I]] to i32 -// CHECK3-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: store i32 [[CONV26_I]], ptr [[I14_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV27_I:%.*]] = sext i32 [[TMP60]] to i64 -// CHECK3-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB28_I:%.*]] = sub i32 [[TMP63]], [[TMP64]] // CHECK3-NEXT: [[SUB29_I:%.*]] = sub i32 [[SUB28_I]], 1 // CHECK3-NEXT: [[CONV33_I:%.*]] = zext i32 [[SUB28_I]] to i64 // CHECK3-NEXT: [[DIV34_I:%.*]] = sdiv i64 [[TMP62]], [[CONV33_I]] -// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB35_I:%.*]] = sub i32 [[TMP65]], [[TMP66]] // CHECK3-NEXT: [[SUB36_I:%.*]] = sub i32 [[SUB35_I]], 1 // CHECK3-NEXT: [[CONV40_I:%.*]] = zext i32 [[SUB35_I]] to i64 @@ -2244,42 +2244,42 @@ // CHECK3-NEXT: [[SUB42_I:%.*]] = sub nsw i64 [[TMP61]], [[MUL41_I]] // CHECK3-NEXT: [[ADD44_I:%.*]] = add nsw i64 [[CONV27_I]], [[SUB42_I]] // CHECK3-NEXT: [[CONV45_I:%.*]] = trunc i64 [[ADD44_I]] to i32 -// CHECK3-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] +// CHECK3-NEXT: store i32 [[CONV45_I]], ptr [[J15_I]], align 4, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: [[TMP67:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD46_I:%.*]] = add nsw i64 [[TMP67]], 1 -// CHECK3-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47, !llvm.access.group [[ACC_GRP48]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK3-NEXT: store i64 [[ADD46_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK3: omp.inner.for.end.i: // CHECK3-NEXT: br label [[OMP_IF_END_I:%.*]] // CHECK3: omp_if.else.i: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND47_I:%.*]] // CHECK3: omp.inner.for.cond47.i: -// CHECK3-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP69:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !47 +// CHECK3-NEXT: [[TMP68:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP69:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP48_I:%.*]] = icmp ule i64 [[TMP68]], [[TMP69]] // CHECK3-NEXT: br i1 [[CMP48_I]], label [[OMP_INNER_FOR_BODY49_I:%.*]], label [[OMP_INNER_FOR_END82_I:%.*]] // CHECK3: omp.inner.for.body49.i: -// CHECK3-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP71:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP72:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP71:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP72:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB50_I:%.*]] = sub i32 [[TMP71]], [[TMP72]] // CHECK3-NEXT: [[SUB51_I:%.*]] = sub i32 [[SUB50_I]], 1 // CHECK3-NEXT: [[CONV55_I:%.*]] = zext i32 [[SUB50_I]] to i64 // CHECK3-NEXT: [[DIV56_I:%.*]] = sdiv i64 [[TMP70]], [[CONV55_I]] // CHECK3-NEXT: [[CONV59_I:%.*]] = trunc i64 [[DIV56_I]] to i32 -// CHECK3-NEXT: store i32 [[CONV59_I]], ptr [[I14_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP73:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: store i32 [[CONV59_I]], ptr [[I14_I]], align 4, !noalias !48 +// CHECK3-NEXT: [[TMP73:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV60_I:%.*]] = sext i32 [[TMP73]] to i64 -// CHECK3-NEXT: [[TMP74:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47 -// CHECK3-NEXT: [[TMP76:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP77:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP74:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP76:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP77:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB61_I:%.*]] = sub i32 [[TMP76]], [[TMP77]] // CHECK3-NEXT: [[SUB62_I:%.*]] = sub i32 [[SUB61_I]], 1 // CHECK3-NEXT: [[CONV66_I:%.*]] = zext i32 [[SUB61_I]] to i64 // CHECK3-NEXT: [[DIV67_I:%.*]] = sdiv i64 [[TMP75]], [[CONV66_I]] -// CHECK3-NEXT: [[TMP78:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP78:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB68_I:%.*]] = sub i32 [[TMP78]], [[TMP79]] // CHECK3-NEXT: [[SUB69_I:%.*]] = sub i32 [[SUB68_I]], 1 // CHECK3-NEXT: [[CONV73_I:%.*]] = zext i32 [[SUB68_I]] to i64 @@ -2287,17 +2287,17 @@ // CHECK3-NEXT: [[SUB75_I:%.*]] = sub nsw i64 [[TMP74]], [[MUL74_I]] // CHECK3-NEXT: [[ADD77_I:%.*]] = add nsw i64 [[CONV60_I]], [[SUB75_I]] // CHECK3-NEXT: [[CONV78_I:%.*]] = trunc i64 [[ADD77_I]] to i32 -// CHECK3-NEXT: store i32 [[CONV78_I]], ptr [[J15_I]], align 4, !noalias !47 -// CHECK3-NEXT: [[TMP80:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !47 +// CHECK3-NEXT: store i32 [[CONV78_I]], ptr [[J15_I]], align 4, !noalias !48 +// CHECK3-NEXT: [[TMP80:%.*]] = load i64, ptr [[DOTOMP_IV_I]], align 8, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD81_I:%.*]] = add nsw i64 [[TMP80]], 1 -// CHECK3-NEXT: store i64 [[ADD81_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !47 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND47_I]], !llvm.loop [[LOOP51:![0-9]+]] +// CHECK3-NEXT: store i64 [[ADD81_I]], ptr [[DOTOMP_IV_I]], align 8, !noalias !48 +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND47_I]], !llvm.loop [[LOOP52:![0-9]+]] // CHECK3: omp.inner.for.end82.i: // CHECK3-NEXT: br label [[OMP_IF_END_I]] // CHECK3: omp_if.end.i: // CHECK3-NEXT: br label [[TASKLOOP_IF_END_I]] // CHECK3: taskloop.if.end.i: -// CHECK3-NEXT: [[TMP81:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !47 +// CHECK3-NEXT: [[TMP81:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !48, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP82:%.*]] = icmp ne i32 [[TMP81]], 0 // CHECK3-NEXT: br i1 [[TMP82]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__6_EXIT:%.*]] // CHECK3: .omp.lastprivate.then.i: @@ -2318,7 +2318,7 @@ // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: ret void @@ -2339,7 +2339,7 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK3-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2354,15 +2354,15 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK3-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP1:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP1]] to i1 // CHECK3-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL2]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL3]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK3-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK3-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..8, ptr [[THIS1]], ptr [[C_ADDR]], i64 [[TMP2]]) // CHECK3-NEXT: ret void // @@ -2385,10 +2385,10 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK3-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK3-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -2398,12 +2398,12 @@ // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[TMP7]], align 8 // CHECK3-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK3-NEXT: store ptr [[TMP]], ptr [[_TMP1]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 @@ -2413,19 +2413,19 @@ // CHECK3-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 [[TMP12]], i64 80, i64 16, ptr @.omp_task_entry..10) // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 5 // CHECK3-NEXT: store i64 0, ptr [[TMP17]], align 8 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[TMP19]] to i64 // CHECK3-NEXT: store i64 [[CONV]], ptr [[TMP18]], align 8 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 7 // CHECK3-NEXT: store i64 1, ptr [[TMP20]], align 8 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP21]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP13]], i32 1, ptr [[TMP17]], ptr [[TMP18]], i64 [[TMP22]], i32 1, i32 2, i64 4, ptr null) // CHECK3-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -2461,76 +2461,76 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META62:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !64 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !64 -// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 -// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK3-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !64 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !65 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !65 +// CHECK3-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 +// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store ptr [[TMP_I]], ptr [[TMP1_I]], align 8, !noalias !65 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON_4:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 -// CHECK3-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP22]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[SUB3_I:%.*]] = sub nsw i32 [[TMP23]], 1 -// CHECK3-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !64 -// CHECK3-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !64 -// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !64 +// CHECK3-NEXT: store i32 [[SUB3_I]], ptr [[DOTCAPTURE_EXPR_2_I]], align 4, !noalias !65 +// CHECK3-NEXT: store ptr [[A_I]], ptr [[TMP4_I]], align 8, !noalias !65 +// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP4_I]], align 8, !noalias !65 // CHECK3-NEXT: store i32 0, ptr [[TMP24]], align 4 -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !64 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !65, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp slt i32 0, [[TMP25]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[TASKLOOP_IF_THEN_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK3: taskloop.if.then.i: -// CHECK3-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !64 -// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !64 +// CHECK3-NEXT: store ptr [[A5_I]], ptr [[TMP6_I]], align 8, !noalias !65 +// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !65, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP26]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !64 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !65 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_ANON_4]], ptr [[TMP18]], i32 0, i32 1 // CHECK3-NEXT: [[TMP28:%.*]] = load ptr, ptr [[TMP27]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64, !llvm.access.group [[ACC_GRP65:![0-9]+]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !llvm.access.group [[ACC_GRP66:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV7_I:%.*]] = sext i32 [[TMP29]] to i64 -// CHECK3-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !64, !llvm.access.group [[ACC_GRP65]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !65, !llvm.access.group [[ACC_GRP66]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP8_I:%.*]] = icmp ule i64 [[CONV7_I]], [[TMP30]] // CHECK3-NEXT: br i1 [[CMP8_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64, !llvm.access.group [[ACC_GRP65]] -// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !64, !llvm.access.group [[ACC_GRP65]] -// CHECK3-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP65]] -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !64, !llvm.access.group [[ACC_GRP65]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !llvm.access.group [[ACC_GRP66]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP6_I]], align 8, !noalias !65, !llvm.access.group [[ACC_GRP66]] +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[TMP32]], align 4, !llvm.access.group [[ACC_GRP66]] +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !llvm.access.group [[ACC_GRP66]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD9_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK3-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !64, !llvm.access.group [[ACC_GRP65]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP66:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD9_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !65, !llvm.access.group [[ACC_GRP66]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP67:![0-9]+]] // CHECK3: omp.inner.for.end.i: // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__9_EXIT]] // CHECK3: .omp_outlined..9.exit: @@ -2581,22 +2581,22 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK5-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK5-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -2604,68 +2604,68 @@ // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_3]], align 4 // CHECK5-NEXT: store i64 0, ptr [[DOTOMP_LB5]], align 8 // CHECK5-NEXT: store i64 9, ptr [[DOTOMP_UB6]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP7]] to i32 // CHECK5-NEXT: store i32 [[CONV8]], ptr [[DOTOMP_IV7]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK5: omp.inner.for.cond10: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV11:%.*]] = sext i32 [[TMP8]] to i64 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP12:%.*]] = icmp ule i64 [[CONV11]], [[TMP9]] // CHECK5-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK5: omp.inner.for.body13: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK5-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK5-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK5-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK5: omp.body.continue16: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK5: omp.inner.for.inc17: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK5: omp.inner.for.end19: // CHECK5-NEXT: store i32 10, ptr [[I9]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_21]], align 1 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_22]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP15]], ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP16]], i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP19]] to i64 // CHECK5-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i8, ptr [[TMP18]], i64 [[IDXPROM28]] -// CHECK5-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1 +// CHECK5-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV30:%.*]] = sext i8 [[TMP20]] to i32 // CHECK5-NEXT: store i32 [[CONV30]], ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK5-NEXT: [[CONV32:%.*]] = sext i32 [[DIV]] to i64 -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB33:%.*]] = sub i32 [[TMP22]], [[TMP23]] // CHECK5-NEXT: [[SUB34:%.*]] = sub i32 [[SUB33]], 1 // CHECK5-NEXT: [[ADD35:%.*]] = add i32 [[SUB34]], 1 @@ -2675,34 +2675,34 @@ // CHECK5-NEXT: [[SUB39:%.*]] = sub nsw i64 [[MUL38]], 1 // CHECK5-NEXT: store i64 [[SUB39]], ptr [[DOTCAPTURE_EXPR_31]], align 8 // CHECK5-NEXT: store i64 0, ptr [[DOTOMP_LB40]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_UB41]], align 8 // CHECK5-NEXT: store i32 0, ptr [[I42]], align 4 -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP25]], ptr [[J]], align 4 -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP43:%.*]] = icmp slt i32 0, [[TMP26]] // CHECK5-NEXT: br i1 [[CMP43]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK5: land.lhs.true: -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP44:%.*]] = icmp slt i32 [[TMP27]], [[TMP28]] // CHECK5-NEXT: br i1 [[CMP44]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK5: simd.if.then: -// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8 +// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i64 [[TMP29]], ptr [[DOTOMP_IV45]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK5-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP30]], i64 8) ] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND48:%.*]] // CHECK5: omp.inner.for.cond48: -// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK5-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP49:%.*]] = icmp ule i64 [[TMP31]], [[TMP32]] // CHECK5-NEXT: br i1 [[CMP49]], label [[OMP_INNER_FOR_BODY50:%.*]], label [[OMP_INNER_FOR_END83:%.*]] // CHECK5: omp.inner.for.body50: -// CHECK5-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB51:%.*]] = sub i32 [[TMP34]], [[TMP35]] // CHECK5-NEXT: [[SUB52:%.*]] = sub i32 [[SUB51]], 1 // CHECK5-NEXT: [[ADD53:%.*]] = add i32 [[SUB52]], 1 @@ -2713,13 +2713,13 @@ // CHECK5-NEXT: [[MUL58:%.*]] = mul nsw i64 [[DIV57]], 1 // CHECK5-NEXT: [[ADD59:%.*]] = add nsw i64 0, [[MUL58]] // CHECK5-NEXT: [[CONV60:%.*]] = trunc i64 [[ADD59]] to i32 -// CHECK5-NEXT: store i32 [[CONV60]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: store i32 [[CONV60]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV61:%.*]] = sext i32 [[TMP36]] to i64 -// CHECK5-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB62:%.*]] = sub i32 [[TMP39]], [[TMP40]] // CHECK5-NEXT: [[SUB63:%.*]] = sub i32 [[SUB62]], 1 // CHECK5-NEXT: [[ADD64:%.*]] = add i32 [[SUB63]], 1 @@ -2727,8 +2727,8 @@ // CHECK5-NEXT: [[MUL66:%.*]] = mul i32 1, [[DIV65]] // CHECK5-NEXT: [[CONV67:%.*]] = zext i32 [[MUL66]] to i64 // CHECK5-NEXT: [[DIV68:%.*]] = sdiv i64 [[TMP38]], [[CONV67]] -// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB69:%.*]] = sub i32 [[TMP41]], [[TMP42]] // CHECK5-NEXT: [[SUB70:%.*]] = sub i32 [[SUB69]], 1 // CHECK5-NEXT: [[ADD71:%.*]] = add i32 [[SUB70]], 1 @@ -2740,25 +2740,25 @@ // CHECK5-NEXT: [[MUL77:%.*]] = mul nsw i64 [[SUB76]], 1 // CHECK5-NEXT: [[ADD78:%.*]] = add nsw i64 [[CONV61]], [[MUL77]] // CHECK5-NEXT: [[CONV79:%.*]] = trunc i64 [[ADD78]] to i32 -// CHECK5-NEXT: store i32 [[CONV79]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: store i32 [[CONV79]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE80:%.*]] // CHECK5: omp.body.continue80: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC81:%.*]] // CHECK5: omp.inner.for.inc81: -// CHECK5-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD82:%.*]] = add nsw i64 [[TMP43]], 1 -// CHECK5-NEXT: store i64 [[ADD82]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND48]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK5-NEXT: store i64 [[ADD82]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND48]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK5: omp.inner.for.end83: -// CHECK5-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK5-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB84:%.*]] = sub nsw i32 [[TMP44]], 0 // CHECK5-NEXT: [[DIV85:%.*]] = sdiv i32 [[SUB84]], 1 // CHECK5-NEXT: [[MUL86:%.*]] = mul nsw i32 [[DIV85]], 1 // CHECK5-NEXT: [[ADD87:%.*]] = add nsw i32 0, [[MUL86]] // CHECK5-NEXT: store i32 [[ADD87]], ptr [[I20]], align 4 -// CHECK5-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK5-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK5-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK5-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB88:%.*]] = sub i32 [[TMP46]], [[TMP47]] // CHECK5-NEXT: [[SUB89:%.*]] = sub i32 [[SUB88]], 1 // CHECK5-NEXT: [[ADD90:%.*]] = add i32 [[SUB89]], 1 @@ -2787,7 +2787,7 @@ // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK5-NEXT: ret void // @@ -2813,58 +2813,58 @@ // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: store ptr [[TMP]], ptr [[_TMP2]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK5-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK5-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 4 // CHECK5-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV:%.*]] = sext i32 [[TMP3]] to i64 // CHECK5-NEXT: store i64 [[CONV]], ptr [[DOTOMP_UB]], align 8 // CHECK5-NEXT: store ptr [[A]], ptr [[_TMP6]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP6]], align 8 // CHECK5-NEXT: store i32 0, ptr [[TMP4]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK5-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK5: simd.if.then: -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP6]] to i32 // CHECK5-NEXT: store i32 [[CONV7]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store ptr [[A8]], ptr [[_TMP9]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CONV10:%.*]] = sext i32 [[TMP7]] to i64 -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP11:%.*]] = icmp ule i64 [[CONV10]], [[TMP8]] // CHECK5-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: store ptr [[A13]], ptr [[_TMP14]], align 8 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK5-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK5-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 @@ -2920,22 +2920,22 @@ // CHECK6-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK6-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK6-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK6-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK6-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK6-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK6-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK6-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK6-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK6: omp.inner.for.cond: -// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 -// CHECK6-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK6-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]] // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK6: omp.inner.for.body: -// CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK6-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK6-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -2943,68 +2943,68 @@ // CHECK6: omp.body.continue: // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK6: omp.inner.for.inc: -// CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK6-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 -// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK6: omp.inner.for.end: // CHECK6-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_3]], align 4 // CHECK6-NEXT: store i64 0, ptr [[DOTOMP_LB5]], align 8 // CHECK6-NEXT: store i64 9, ptr [[DOTOMP_UB6]], align 8 -// CHECK6-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8 +// CHECK6-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP7]] to i32 // CHECK6-NEXT: store i32 [[CONV8]], ptr [[DOTOMP_IV7]], align 4 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK6: omp.inner.for.cond10: -// CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] +// CHECK6-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV11:%.*]] = sext i32 [[TMP8]] to i64 -// CHECK6-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK6-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP12:%.*]] = icmp ule i64 [[CONV11]], [[TMP9]] // CHECK6-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK6: omp.inner.for.body13: -// CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK6-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK6-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK6-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK6-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK6: omp.body.continue16: // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK6: omp.inner.for.inc17: -// CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK6-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK6-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK6: omp.inner.for.end19: // CHECK6-NEXT: store i32 10, ptr [[I9]], align 4 -// CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK6-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK6-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_21]], align 1 -// CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_22]], align 4 -// CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP15]], ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK6-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK6-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP16]], i64 [[IDXPROM]] -// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK6-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP19]] to i64 // CHECK6-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i8, ptr [[TMP18]], i64 [[IDXPROM28]] -// CHECK6-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1 +// CHECK6-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV30:%.*]] = sext i8 [[TMP20]] to i32 // CHECK6-NEXT: store i32 [[CONV30]], ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK6-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK6-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK6-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK6-NEXT: [[CONV32:%.*]] = sext i32 [[DIV]] to i64 -// CHECK6-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK6-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK6-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB33:%.*]] = sub i32 [[TMP22]], [[TMP23]] // CHECK6-NEXT: [[SUB34:%.*]] = sub i32 [[SUB33]], 1 // CHECK6-NEXT: [[ADD35:%.*]] = add i32 [[SUB34]], 1 @@ -3014,34 +3014,34 @@ // CHECK6-NEXT: [[SUB39:%.*]] = sub nsw i64 [[MUL38]], 1 // CHECK6-NEXT: store i64 [[SUB39]], ptr [[DOTCAPTURE_EXPR_31]], align 8 // CHECK6-NEXT: store i64 0, ptr [[DOTOMP_LB40]], align 8 -// CHECK6-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8 +// CHECK6-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_UB41]], align 8 // CHECK6-NEXT: store i32 0, ptr [[I42]], align 4 -// CHECK6-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK6-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP25]], ptr [[J]], align 4 -// CHECK6-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK6-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP43:%.*]] = icmp slt i32 0, [[TMP26]] // CHECK6-NEXT: br i1 [[CMP43]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK6: land.lhs.true: -// CHECK6-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 +// CHECK6-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP44:%.*]] = icmp slt i32 [[TMP27]], [[TMP28]] // CHECK6-NEXT: br i1 [[CMP44]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK6: simd.if.then: -// CHECK6-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8 +// CHECK6-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i64 [[TMP29]], ptr [[DOTOMP_IV45]], align 8 -// CHECK6-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK6-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP30]], i64 8) ] // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND48:%.*]] // CHECK6: omp.inner.for.cond48: -// CHECK6-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK6-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP49:%.*]] = icmp ule i64 [[TMP31]], [[TMP32]] // CHECK6-NEXT: br i1 [[CMP49]], label [[OMP_INNER_FOR_BODY50:%.*]], label [[OMP_INNER_FOR_END83:%.*]] // CHECK6: omp.inner.for.body50: -// CHECK6-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB51:%.*]] = sub i32 [[TMP34]], [[TMP35]] // CHECK6-NEXT: [[SUB52:%.*]] = sub i32 [[SUB51]], 1 // CHECK6-NEXT: [[ADD53:%.*]] = add i32 [[SUB52]], 1 @@ -3052,13 +3052,13 @@ // CHECK6-NEXT: [[MUL58:%.*]] = mul nsw i64 [[DIV57]], 1 // CHECK6-NEXT: [[ADD59:%.*]] = add nsw i64 0, [[MUL58]] // CHECK6-NEXT: [[CONV60:%.*]] = trunc i64 [[ADD59]] to i32 -// CHECK6-NEXT: store i32 [[CONV60]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: store i32 [[CONV60]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK6-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV61:%.*]] = sext i32 [[TMP36]] to i64 -// CHECK6-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB62:%.*]] = sub i32 [[TMP39]], [[TMP40]] // CHECK6-NEXT: [[SUB63:%.*]] = sub i32 [[SUB62]], 1 // CHECK6-NEXT: [[ADD64:%.*]] = add i32 [[SUB63]], 1 @@ -3066,8 +3066,8 @@ // CHECK6-NEXT: [[MUL66:%.*]] = mul i32 1, [[DIV65]] // CHECK6-NEXT: [[CONV67:%.*]] = zext i32 [[MUL66]] to i64 // CHECK6-NEXT: [[DIV68:%.*]] = sdiv i64 [[TMP38]], [[CONV67]] -// CHECK6-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB69:%.*]] = sub i32 [[TMP41]], [[TMP42]] // CHECK6-NEXT: [[SUB70:%.*]] = sub i32 [[SUB69]], 1 // CHECK6-NEXT: [[ADD71:%.*]] = add i32 [[SUB70]], 1 @@ -3079,25 +3079,25 @@ // CHECK6-NEXT: [[MUL77:%.*]] = mul nsw i64 [[SUB76]], 1 // CHECK6-NEXT: [[ADD78:%.*]] = add nsw i64 [[CONV61]], [[MUL77]] // CHECK6-NEXT: [[CONV79:%.*]] = trunc i64 [[ADD78]] to i32 -// CHECK6-NEXT: store i32 [[CONV79]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: store i32 [[CONV79]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE80:%.*]] // CHECK6: omp.body.continue80: // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC81:%.*]] // CHECK6: omp.inner.for.inc81: -// CHECK6-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK6-NEXT: [[TMP43:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[ADD82:%.*]] = add nsw i64 [[TMP43]], 1 -// CHECK6-NEXT: store i64 [[ADD82]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND48]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK6-NEXT: store i64 [[ADD82]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND48]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK6: omp.inner.for.end83: -// CHECK6-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK6-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB84:%.*]] = sub nsw i32 [[TMP44]], 0 // CHECK6-NEXT: [[DIV85:%.*]] = sdiv i32 [[SUB84]], 1 // CHECK6-NEXT: [[MUL86:%.*]] = mul nsw i32 [[DIV85]], 1 // CHECK6-NEXT: [[ADD87:%.*]] = add nsw i32 0, [[MUL86]] // CHECK6-NEXT: store i32 [[ADD87]], ptr [[I20]], align 4 -// CHECK6-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK6-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK6-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK6-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK6-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB88:%.*]] = sub i32 [[TMP46]], [[TMP47]] // CHECK6-NEXT: [[SUB89:%.*]] = sub i32 [[SUB88]], 1 // CHECK6-NEXT: [[ADD90:%.*]] = add i32 [[SUB89]], 1 @@ -3132,58 +3132,58 @@ // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK6-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK6-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK6-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK6-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK6-NEXT: store ptr [[TMP]], ptr [[_TMP2]], align 8 -// CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK6-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK6-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK6-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK6-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 4 // CHECK6-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV:%.*]] = sext i32 [[TMP3]] to i64 // CHECK6-NEXT: store i64 [[CONV]], ptr [[DOTOMP_UB]], align 8 // CHECK6-NEXT: store ptr [[A]], ptr [[_TMP6]], align 8 // CHECK6-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP6]], align 8 // CHECK6-NEXT: store i32 0, ptr [[TMP4]], align 4 -// CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK6-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK6-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK6: simd.if.then: -// CHECK6-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK6-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP6]] to i32 // CHECK6-NEXT: store i32 [[CONV7]], ptr [[DOTOMP_IV]], align 4 // CHECK6-NEXT: store ptr [[A8]], ptr [[_TMP9]], align 8 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK6: omp.inner.for.cond: -// CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] +// CHECK6-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CONV10:%.*]] = sext i32 [[TMP7]] to i64 -// CHECK6-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP12]] +// CHECK6-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[CMP11:%.*]] = icmp ule i64 [[CONV10]], [[TMP8]] // CHECK6-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK6: omp.inner.for.body: -// CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK6-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK6-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK6-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK6-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK6-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK6: omp.body.continue: // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK6: omp.inner.for.inc: -// CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK6-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK6-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK6-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK6: omp.inner.for.end: // CHECK6-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK6-NEXT: store ptr [[A13]], ptr [[_TMP14]], align 8 -// CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK6-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK6-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK6-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 @@ -3203,7 +3203,7 @@ // CHECK6-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK6-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK6-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK6-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK6-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK6-NEXT: ret void // @@ -3259,22 +3259,22 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK7-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK7-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 -// CHECK7-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -3282,68 +3282,68 @@ // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_3]], align 4 // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB5]], align 8 // CHECK7-NEXT: store i64 9, ptr [[DOTOMP_UB6]], align 8 -// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8 +// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP7]] to i32 // CHECK7-NEXT: store i32 [[CONV8]], ptr [[DOTOMP_IV7]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK7: omp.inner.for.cond10: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV11:%.*]] = sext i32 [[TMP8]] to i64 -// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP12:%.*]] = icmp ule i64 [[CONV11]], [[TMP9]] // CHECK7-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK7: omp.inner.for.body13: -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK7-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK7-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK7-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK7: omp.body.continue16: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK7: omp.inner.for.inc17: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK7: omp.inner.for.end19: // CHECK7-NEXT: store i32 10, ptr [[I9]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_21]], align 1 -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_22]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP15]], ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP16]], i64 [[IDXPROM]] -// CHECK7-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP19]] to i64 // CHECK7-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i8, ptr [[TMP18]], i64 [[IDXPROM28]] -// CHECK7-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1 +// CHECK7-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV30:%.*]] = sext i8 [[TMP20]] to i32 // CHECK7-NEXT: store i32 [[CONV30]], ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK7-NEXT: [[CONV32:%.*]] = sext i32 [[DIV]] to i64 -// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB33:%.*]] = sub i32 [[TMP22]], [[TMP23]] // CHECK7-NEXT: [[SUB34:%.*]] = sub i32 [[SUB33]], 1 // CHECK7-NEXT: [[ADD35:%.*]] = add i32 [[SUB34]], 1 @@ -3353,38 +3353,38 @@ // CHECK7-NEXT: [[SUB39:%.*]] = sub nsw i64 [[MUL38]], 1 // CHECK7-NEXT: store i64 [[SUB39]], ptr [[DOTCAPTURE_EXPR_31]], align 8 // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB40]], align 8 -// CHECK7-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8 +// CHECK7-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_UB41]], align 8 // CHECK7-NEXT: store i32 0, ptr [[I42]], align 4 -// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP25]], ptr [[J]], align 4 -// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP43:%.*]] = icmp slt i32 0, [[TMP26]] // CHECK7-NEXT: br i1 [[CMP43]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK7: land.lhs.true: -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP44:%.*]] = icmp slt i32 [[TMP27]], [[TMP28]] // CHECK7-NEXT: br i1 [[CMP44]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK7: simd.if.then: -// CHECK7-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8 +// CHECK7-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i64 [[TMP29]], ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK7-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP30]], i64 8) ] -// CHECK7-NEXT: [[TMP31:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_21]], align 1 +// CHECK7-NEXT: [[TMP31:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_21]], align 1, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL48:%.*]] = trunc i8 [[TMP31]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL48]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND49:%.*]] // CHECK7: omp.inner.for.cond49: -// CHECK7-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK7-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP50:%.*]] = icmp ule i64 [[TMP32]], [[TMP33]] // CHECK7-NEXT: br i1 [[CMP50]], label [[OMP_INNER_FOR_BODY51:%.*]], label [[OMP_INNER_FOR_END84:%.*]] // CHECK7: omp.inner.for.body51: -// CHECK7-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB52:%.*]] = sub i32 [[TMP35]], [[TMP36]] // CHECK7-NEXT: [[SUB53:%.*]] = sub i32 [[SUB52]], 1 // CHECK7-NEXT: [[ADD54:%.*]] = add i32 [[SUB53]], 1 @@ -3395,13 +3395,13 @@ // CHECK7-NEXT: [[MUL59:%.*]] = mul nsw i64 [[DIV58]], 1 // CHECK7-NEXT: [[ADD60:%.*]] = add nsw i64 0, [[MUL59]] // CHECK7-NEXT: [[CONV61:%.*]] = trunc i64 [[ADD60]] to i32 -// CHECK7-NEXT: store i32 [[CONV61]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: store i32 [[CONV61]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV62:%.*]] = sext i32 [[TMP37]] to i64 -// CHECK7-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB63:%.*]] = sub i32 [[TMP40]], [[TMP41]] // CHECK7-NEXT: [[SUB64:%.*]] = sub i32 [[SUB63]], 1 // CHECK7-NEXT: [[ADD65:%.*]] = add i32 [[SUB64]], 1 @@ -3409,8 +3409,8 @@ // CHECK7-NEXT: [[MUL67:%.*]] = mul i32 1, [[DIV66]] // CHECK7-NEXT: [[CONV68:%.*]] = zext i32 [[MUL67]] to i64 // CHECK7-NEXT: [[DIV69:%.*]] = sdiv i64 [[TMP39]], [[CONV68]] -// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB70:%.*]] = sub i32 [[TMP42]], [[TMP43]] // CHECK7-NEXT: [[SUB71:%.*]] = sub i32 [[SUB70]], 1 // CHECK7-NEXT: [[ADD72:%.*]] = add i32 [[SUB71]], 1 @@ -3422,28 +3422,28 @@ // CHECK7-NEXT: [[MUL78:%.*]] = mul nsw i64 [[SUB77]], 1 // CHECK7-NEXT: [[ADD79:%.*]] = add nsw i64 [[CONV62]], [[MUL78]] // CHECK7-NEXT: [[CONV80:%.*]] = trunc i64 [[ADD79]] to i32 -// CHECK7-NEXT: store i32 [[CONV80]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: store i32 [[CONV80]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE81:%.*]] // CHECK7: omp.body.continue81: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC82:%.*]] // CHECK7: omp.inner.for.inc82: -// CHECK7-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK7-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD83:%.*]] = add nsw i64 [[TMP44]], 1 -// CHECK7-NEXT: store i64 [[ADD83]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND49]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK7-NEXT: store i64 [[ADD83]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND49]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK7: omp.inner.for.end84: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND85:%.*]] // CHECK7: omp.inner.for.cond85: -// CHECK7-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8 +// CHECK7-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP86:%.*]] = icmp ule i64 [[TMP45]], [[TMP46]] // CHECK7-NEXT: br i1 [[CMP86]], label [[OMP_INNER_FOR_BODY87:%.*]], label [[OMP_INNER_FOR_END120:%.*]] // CHECK7: omp.inner.for.body87: -// CHECK7-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB88:%.*]] = sub i32 [[TMP48]], [[TMP49]] // CHECK7-NEXT: [[SUB89:%.*]] = sub i32 [[SUB88]], 1 // CHECK7-NEXT: [[ADD90:%.*]] = add i32 [[SUB89]], 1 @@ -3455,12 +3455,12 @@ // CHECK7-NEXT: [[ADD96:%.*]] = add nsw i64 0, [[MUL95]] // CHECK7-NEXT: [[CONV97:%.*]] = trunc i64 [[ADD96]] to i32 // CHECK7-NEXT: store i32 [[CONV97]], ptr [[I46]], align 4 -// CHECK7-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV98:%.*]] = sext i32 [[TMP50]] to i64 -// CHECK7-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB99:%.*]] = sub i32 [[TMP53]], [[TMP54]] // CHECK7-NEXT: [[SUB100:%.*]] = sub i32 [[SUB99]], 1 // CHECK7-NEXT: [[ADD101:%.*]] = add i32 [[SUB100]], 1 @@ -3468,8 +3468,8 @@ // CHECK7-NEXT: [[MUL103:%.*]] = mul i32 1, [[DIV102]] // CHECK7-NEXT: [[CONV104:%.*]] = zext i32 [[MUL103]] to i64 // CHECK7-NEXT: [[DIV105:%.*]] = sdiv i64 [[TMP52]], [[CONV104]] -// CHECK7-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB106:%.*]] = sub i32 [[TMP55]], [[TMP56]] // CHECK7-NEXT: [[SUB107:%.*]] = sub i32 [[SUB106]], 1 // CHECK7-NEXT: [[ADD108:%.*]] = add i32 [[SUB107]], 1 @@ -3486,22 +3486,22 @@ // CHECK7: omp.body.continue117: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC118:%.*]] // CHECK7: omp.inner.for.inc118: -// CHECK7-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 +// CHECK7-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD119:%.*]] = add nsw i64 [[TMP57]], 1 // CHECK7-NEXT: store i64 [[ADD119]], ptr [[DOTOMP_IV45]], align 8 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND85]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND85]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK7: omp.inner.for.end120: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK7-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB121:%.*]] = sub nsw i32 [[TMP58]], 0 // CHECK7-NEXT: [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1 // CHECK7-NEXT: [[MUL123:%.*]] = mul nsw i32 [[DIV122]], 1 // CHECK7-NEXT: [[ADD124:%.*]] = add nsw i32 0, [[MUL123]] // CHECK7-NEXT: store i32 [[ADD124]], ptr [[I20]], align 4 -// CHECK7-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK7-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK7-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK7-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB125:%.*]] = sub i32 [[TMP60]], [[TMP61]] // CHECK7-NEXT: [[SUB126:%.*]] = sub i32 [[SUB125]], 1 // CHECK7-NEXT: [[ADD127:%.*]] = add i32 [[SUB126]], 1 @@ -3530,7 +3530,7 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK7-NEXT: ret void // @@ -3556,58 +3556,58 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: store ptr [[TMP]], ptr [[_TMP2]], align 8 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK7-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK7-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 4 // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV:%.*]] = sext i32 [[TMP3]] to i64 // CHECK7-NEXT: store i64 [[CONV]], ptr [[DOTOMP_UB]], align 8 // CHECK7-NEXT: store ptr [[A]], ptr [[_TMP6]], align 8 // CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP6]], align 8 // CHECK7-NEXT: store i32 0, ptr [[TMP4]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK7-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK7: simd.if.then: -// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP6]] to i32 // CHECK7-NEXT: store i32 [[CONV7]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store ptr [[A8]], ptr [[_TMP9]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV10:%.*]] = sext i32 [[TMP7]] to i64 -// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP11:%.*]] = icmp ule i64 [[CONV10]], [[TMP8]] // CHECK7-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP14]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK7-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: store ptr [[A13]], ptr [[_TMP14]], align 8 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK7-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK7-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 @@ -3663,22 +3663,22 @@ // CHECK8-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK8-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK8-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK8-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK8-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK8-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK8-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK8-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK8-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK8: omp.inner.for.cond: -// CHECK8-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK8-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 -// CHECK8-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK8-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]] // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK8: omp.inner.for.body: -// CHECK8-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK8-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK8-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -3686,68 +3686,68 @@ // CHECK8: omp.body.continue: // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK8: omp.inner.for.inc: -// CHECK8-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK8-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 // CHECK8-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 -// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK8: omp.inner.for.end: // CHECK8-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK8-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_3]], align 4 // CHECK8-NEXT: store i64 0, ptr [[DOTOMP_LB5]], align 8 // CHECK8-NEXT: store i64 9, ptr [[DOTOMP_UB6]], align 8 -// CHECK8-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8 +// CHECK8-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_LB5]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP7]] to i32 // CHECK8-NEXT: store i32 [[CONV8]], ptr [[DOTOMP_IV7]], align 4 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK8: omp.inner.for.cond10: -// CHECK8-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] +// CHECK8-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV11:%.*]] = sext i32 [[TMP8]] to i64 -// CHECK8-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK8-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB6]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP12:%.*]] = icmp ule i64 [[CONV11]], [[TMP9]] // CHECK8-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY13:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK8: omp.inner.for.body13: -// CHECK8-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK8-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[MUL14:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK8-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]] -// CHECK8-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK8-NEXT: store i32 [[ADD15]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK8: omp.body.continue16: // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK8: omp.inner.for.inc17: -// CHECK8-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK8-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK8-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK8-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK8: omp.inner.for.end19: // CHECK8-NEXT: store i32 10, ptr [[I9]], align 4 -// CHECK8-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK8-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_21]], align 1 -// CHECK8-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_22]], align 4 -// CHECK8-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP14]], ptr [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK8-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP15]], ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK8-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK8-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP16:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP16]], i64 [[IDXPROM]] -// CHECK8-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 -// CHECK8-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK8-NEXT: [[TMP18:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP19]] to i64 // CHECK8-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i8, ptr [[TMP18]], i64 [[IDXPROM28]] -// CHECK8-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1 +// CHECK8-NEXT: [[TMP20:%.*]] = load i8, ptr [[ARRAYIDX29]], align 1, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV30:%.*]] = sext i8 [[TMP20]] to i32 // CHECK8-NEXT: store i32 [[CONV30]], ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK8-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK8-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK8-NEXT: [[CONV32:%.*]] = sext i32 [[DIV]] to i64 -// CHECK8-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB33:%.*]] = sub i32 [[TMP22]], [[TMP23]] // CHECK8-NEXT: [[SUB34:%.*]] = sub i32 [[SUB33]], 1 // CHECK8-NEXT: [[ADD35:%.*]] = add i32 [[SUB34]], 1 @@ -3757,38 +3757,38 @@ // CHECK8-NEXT: [[SUB39:%.*]] = sub nsw i64 [[MUL38]], 1 // CHECK8-NEXT: store i64 [[SUB39]], ptr [[DOTCAPTURE_EXPR_31]], align 8 // CHECK8-NEXT: store i64 0, ptr [[DOTOMP_LB40]], align 8 -// CHECK8-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8 +// CHECK8-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_31]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i64 [[TMP24]], ptr [[DOTOMP_UB41]], align 8 // CHECK8-NEXT: store i32 0, ptr [[I42]], align 4 -// CHECK8-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP25]], ptr [[J]], align 4 -// CHECK8-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK8-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP43:%.*]] = icmp slt i32 0, [[TMP26]] // CHECK8-NEXT: br i1 [[CMP43]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK8: land.lhs.true: -// CHECK8-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK8-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 +// CHECK8-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP44:%.*]] = icmp slt i32 [[TMP27]], [[TMP28]] // CHECK8-NEXT: br i1 [[CMP44]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK8: simd.if.then: -// CHECK8-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8 +// CHECK8-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_LB40]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i64 [[TMP29]], ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK8-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP30]], i64 8) ] -// CHECK8-NEXT: [[TMP31:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_21]], align 1 +// CHECK8-NEXT: [[TMP31:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_21]], align 1, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[TOBOOL48:%.*]] = trunc i8 [[TMP31]] to i1 // CHECK8-NEXT: br i1 [[TOBOOL48]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK8: omp_if.then: // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND49:%.*]] // CHECK8: omp.inner.for.cond49: -// CHECK8-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK8-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP50:%.*]] = icmp ule i64 [[TMP32]], [[TMP33]] // CHECK8-NEXT: br i1 [[CMP50]], label [[OMP_INNER_FOR_BODY51:%.*]], label [[OMP_INNER_FOR_END84:%.*]] // CHECK8: omp.inner.for.body51: -// CHECK8-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB52:%.*]] = sub i32 [[TMP35]], [[TMP36]] // CHECK8-NEXT: [[SUB53:%.*]] = sub i32 [[SUB52]], 1 // CHECK8-NEXT: [[ADD54:%.*]] = add i32 [[SUB53]], 1 @@ -3799,13 +3799,13 @@ // CHECK8-NEXT: [[MUL59:%.*]] = mul nsw i64 [[DIV58]], 1 // CHECK8-NEXT: [[ADD60:%.*]] = add nsw i64 0, [[MUL59]] // CHECK8-NEXT: [[CONV61:%.*]] = trunc i64 [[ADD60]] to i32 -// CHECK8-NEXT: store i32 [[CONV61]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: store i32 [[CONV61]], ptr [[I46]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK8-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV62:%.*]] = sext i32 [[TMP37]] to i64 -// CHECK8-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB63:%.*]] = sub i32 [[TMP40]], [[TMP41]] // CHECK8-NEXT: [[SUB64:%.*]] = sub i32 [[SUB63]], 1 // CHECK8-NEXT: [[ADD65:%.*]] = add i32 [[SUB64]], 1 @@ -3813,8 +3813,8 @@ // CHECK8-NEXT: [[MUL67:%.*]] = mul i32 1, [[DIV66]] // CHECK8-NEXT: [[CONV68:%.*]] = zext i32 [[MUL67]] to i64 // CHECK8-NEXT: [[DIV69:%.*]] = sdiv i64 [[TMP39]], [[CONV68]] -// CHECK8-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB70:%.*]] = sub i32 [[TMP42]], [[TMP43]] // CHECK8-NEXT: [[SUB71:%.*]] = sub i32 [[SUB70]], 1 // CHECK8-NEXT: [[ADD72:%.*]] = add i32 [[SUB71]], 1 @@ -3826,28 +3826,28 @@ // CHECK8-NEXT: [[MUL78:%.*]] = mul nsw i64 [[SUB77]], 1 // CHECK8-NEXT: [[ADD79:%.*]] = add nsw i64 [[CONV62]], [[MUL78]] // CHECK8-NEXT: [[CONV80:%.*]] = trunc i64 [[ADD79]] to i32 -// CHECK8-NEXT: store i32 [[CONV80]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: store i32 [[CONV80]], ptr [[J47]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE81:%.*]] // CHECK8: omp.body.continue81: // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC82:%.*]] // CHECK8: omp.inner.for.inc82: -// CHECK8-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK8-NEXT: [[TMP44:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[ADD83:%.*]] = add nsw i64 [[TMP44]], 1 -// CHECK8-NEXT: store i64 [[ADD83]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND49]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK8-NEXT: store i64 [[ADD83]], ptr [[DOTOMP_IV45]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND49]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK8: omp.inner.for.end84: // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] // CHECK8: omp_if.else: // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND85:%.*]] // CHECK8: omp.inner.for.cond85: -// CHECK8-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8 +// CHECK8-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTOMP_UB41]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP86:%.*]] = icmp ule i64 [[TMP45]], [[TMP46]] // CHECK8-NEXT: br i1 [[CMP86]], label [[OMP_INNER_FOR_BODY87:%.*]], label [[OMP_INNER_FOR_END120:%.*]] // CHECK8: omp.inner.for.body87: -// CHECK8-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP47:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB88:%.*]] = sub i32 [[TMP48]], [[TMP49]] // CHECK8-NEXT: [[SUB89:%.*]] = sub i32 [[SUB88]], 1 // CHECK8-NEXT: [[ADD90:%.*]] = add i32 [[SUB89]], 1 @@ -3859,12 +3859,12 @@ // CHECK8-NEXT: [[ADD96:%.*]] = add nsw i64 0, [[MUL95]] // CHECK8-NEXT: [[CONV97:%.*]] = trunc i64 [[ADD96]] to i32 // CHECK8-NEXT: store i32 [[CONV97]], ptr [[I46]], align 4 -// CHECK8-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP50:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV98:%.*]] = sext i32 [[TMP50]] to i64 -// CHECK8-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP51:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP52:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB99:%.*]] = sub i32 [[TMP53]], [[TMP54]] // CHECK8-NEXT: [[SUB100:%.*]] = sub i32 [[SUB99]], 1 // CHECK8-NEXT: [[ADD101:%.*]] = add i32 [[SUB100]], 1 @@ -3872,8 +3872,8 @@ // CHECK8-NEXT: [[MUL103:%.*]] = mul i32 1, [[DIV102]] // CHECK8-NEXT: [[CONV104:%.*]] = zext i32 [[MUL103]] to i64 // CHECK8-NEXT: [[DIV105:%.*]] = sdiv i64 [[TMP52]], [[CONV104]] -// CHECK8-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB106:%.*]] = sub i32 [[TMP55]], [[TMP56]] // CHECK8-NEXT: [[SUB107:%.*]] = sub i32 [[SUB106]], 1 // CHECK8-NEXT: [[ADD108:%.*]] = add i32 [[SUB107]], 1 @@ -3890,22 +3890,22 @@ // CHECK8: omp.body.continue117: // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC118:%.*]] // CHECK8: omp.inner.for.inc118: -// CHECK8-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8 +// CHECK8-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_IV45]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[ADD119:%.*]] = add nsw i64 [[TMP57]], 1 // CHECK8-NEXT: store i64 [[ADD119]], ptr [[DOTOMP_IV45]], align 8 -// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND85]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND85]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK8: omp.inner.for.end120: // CHECK8-NEXT: br label [[OMP_IF_END]] // CHECK8: omp_if.end: -// CHECK8-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK8-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_25]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB121:%.*]] = sub nsw i32 [[TMP58]], 0 // CHECK8-NEXT: [[DIV122:%.*]] = sdiv i32 [[SUB121]], 1 // CHECK8-NEXT: [[MUL123:%.*]] = mul nsw i32 [[DIV122]], 1 // CHECK8-NEXT: [[ADD124:%.*]] = add nsw i32 0, [[MUL123]] // CHECK8-NEXT: store i32 [[ADD124]], ptr [[I20]], align 4 -// CHECK8-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK8-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK8-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK8-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_27]], align 4, !noundef [[NOUNDEF2]] +// CHECK8-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_26]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB125:%.*]] = sub i32 [[TMP60]], [[TMP61]] // CHECK8-NEXT: [[SUB126:%.*]] = sub i32 [[SUB125]], 1 // CHECK8-NEXT: [[ADD127:%.*]] = add i32 [[SUB126]], 1 @@ -3940,58 +3940,58 @@ // CHECK8-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK8-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK8-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP0]], 0 // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK8-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK8-NEXT: store ptr [[TMP]], ptr [[_TMP2]], align 8 -// CHECK8-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK8-NEXT: [[TMP1:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK8-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK8-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK8-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK8-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK8-NEXT: store i32 [[SUB5]], ptr [[DOTCAPTURE_EXPR_4]], align 4 // CHECK8-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK8-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK8-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV:%.*]] = sext i32 [[TMP3]] to i64 // CHECK8-NEXT: store i64 [[CONV]], ptr [[DOTOMP_UB]], align 8 // CHECK8-NEXT: store ptr [[A]], ptr [[_TMP6]], align 8 // CHECK8-NEXT: [[TMP4:%.*]] = load ptr, ptr [[_TMP6]], align 8 // CHECK8-NEXT: store i32 0, ptr [[TMP4]], align 4 -// CHECK8-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK8-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK8-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK8: simd.if.then: -// CHECK8-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK8-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV7:%.*]] = trunc i64 [[TMP6]] to i32 // CHECK8-NEXT: store i32 [[CONV7]], ptr [[DOTOMP_IV]], align 4 // CHECK8-NEXT: store ptr [[A8]], ptr [[_TMP9]], align 8 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK8: omp.inner.for.cond: -// CHECK8-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] +// CHECK8-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CONV10:%.*]] = sext i32 [[TMP7]] to i64 -// CHECK8-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK8-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[CMP11:%.*]] = icmp ule i64 [[CONV10]], [[TMP8]] // CHECK8-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK8: omp.inner.for.body: -// CHECK8-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK8-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK8-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP14]] -// CHECK8-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK8-NEXT: [[TMP10:%.*]] = load ptr, ptr [[_TMP9]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK8-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK8: omp.body.continue: // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK8: omp.inner.for.inc: -// CHECK8-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK8-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK8-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK8-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK8: omp.inner.for.end: // CHECK8-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK8-NEXT: store ptr [[A13]], ptr [[_TMP14]], align 8 -// CHECK8-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK8-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK8-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK8-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 @@ -4011,7 +4011,7 @@ // CHECK8-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK8-NEXT: store i32 [[C]], ptr [[C_ADDR]], align 4 // CHECK8-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4 +// CHECK8-NEXT: [[TMP0:%.*]] = load i32, ptr [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK8-NEXT: call void @_ZN1SC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK8-NEXT: ret void // diff --git a/clang/test/OpenMP/parallel_master_taskloop_simd_lastprivate_codegen.cpp b/clang/test/OpenMP/parallel_master_taskloop_simd_lastprivate_codegen.cpp --- a/clang/test/OpenMP/parallel_master_taskloop_simd_lastprivate_codegen.cpp +++ b/clang/test/OpenMP/parallel_master_taskloop_simd_lastprivate_codegen.cpp @@ -255,7 +255,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @_ZN1SIdEC2Ed(ptr noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -282,7 +282,7 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ARR_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VAR_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK1-NEXT: br i1 [[TMP7]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -301,7 +301,7 @@ // CHECK1-NEXT: [[TMP13:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP5]], i32 9, i64 120, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP13]], i32 0, i32 0 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP14]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP16]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP13]], i32 0, i32 1 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP17]], i32 0, i32 0 @@ -327,7 +327,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP14]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP24]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP5]], ptr [[TMP13]], i32 1, ptr [[TMP21]], ptr [[TMP22]], i64 [[TMP25]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP5]]) @@ -395,42 +395,42 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]], ptr [[DOTLASTPRIV_PTR_ADDR2_I]], ptr [[DOTLASTPRIV_PTR_ADDR3_I]], ptr [[DOTLASTPRIV_PTR_ADDR4_I]]) #[[ATTR4]] // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 3 // CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8 @@ -445,39 +445,39 @@ // CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 4 // CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR4_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR4_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP40]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV5_I:%.*]] = sext i32 [[TMP41]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV5_I]], [[TMP42]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: store i32 [[TMP43]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP36]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: store i32 [[TMP44]], ptr [[TMP38]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 8 [[TMP35]], i64 8, i1 false), !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: store i32 33, ptr [[TMP39]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP43]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP36]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP44]], ptr [[TMP38]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[TMP37]], ptr align 8 [[TMP35]], i64 8, i1 false), !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: store i32 33, ptr [[TMP39]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD7_I:%.*]] = add nsw i32 [[TMP45]], 1 -// CHECK1-NEXT: store i32 [[ADD7_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD7_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: omp.inner.for.end.i: -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP47:%.*]] = icmp ne i32 [[TMP46]], 0 // CHECK1-NEXT: br i1 [[TMP47]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: .omp.lastprivate.then.i: // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP23]], ptr align 8 [[TMP35]], i64 8, i1 false) -// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4 +// CHECK1-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP48]], ptr [[TMP25]], align 4 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr [[STRUCT_S:%.*]], ptr [[TMP27]], i64 2 // CHECK1-NEXT: br label [[OMP_ARRAYCPY_BODY_I:%.*]] @@ -491,7 +491,7 @@ // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE_I]], label [[OMP_ARRAYCPY_DONE8_I:%.*]], label [[OMP_ARRAYCPY_BODY_I]] // CHECK1: omp.arraycpy.done8.i: // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP28]], ptr align 4 [[TMP38]], i64 8, i1 false) -// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP39]], align 4 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP39]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP50]], ptr [[TMP34]], align 4 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: @@ -510,7 +510,7 @@ // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP7]], i32 0, i32 0 @@ -624,7 +624,7 @@ // CHECK1-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store double [[TMP0]], ptr [[F]], align 8 // CHECK1-NEXT: ret void // @@ -656,7 +656,7 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK1-NEXT: ret void // @@ -683,7 +683,7 @@ // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ARR_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[VAR_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: [[TMP7:%.*]] = icmp ne i32 [[TMP6]], 0 // CHECK1-NEXT: br i1 [[TMP7]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -700,7 +700,7 @@ // CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP5]], i32 9, i64 256, i64 32, ptr @.omp_task_entry..5) // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP13]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 128 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[AGG_CAPTURED]], i64 32, i1 false) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP12]], i32 0, i32 2 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], ptr [[TMP16]], i32 0, i32 2 @@ -726,7 +726,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP22]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP23]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP22]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP5]], ptr [[TMP12]], i32 1, ptr [[TMP20]], ptr [[TMP21]], i64 [[TMP24]], i32 1, i32 0, i64 0, ptr @.omp_task_dup..6) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP5]]) @@ -788,42 +788,42 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP3]], i32 0, i32 2 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 16 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 16, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 64 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 64, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr @.omp_task_privates_map..4, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr @.omp_task_privates_map..4, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 // CHECK1-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]], ptr [[DOTLASTPRIV_PTR_ADDR2_I]], ptr [[DOTLASTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8 @@ -836,36 +836,36 @@ // CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[TMP29]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], ptr [[TMP19]], i32 0, i32 3 // CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR2_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR3_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !33, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !33 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33:![0-9]+]] +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !33, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV4_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !33, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 128, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: store i32 [[TMP41]], ptr [[TMP34]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[TMP36]], i64 4, i1 false), !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !33, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !33, !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 128, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP41]], ptr [[TMP34]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[TMP36]], i64 4, i1 false), !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !33, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD6_I:%.*]] = add nsw i32 [[TMP42]], 1 -// CHECK1-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !33, !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK1: omp.inner.for.end.i: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !32 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 // CHECK1-NEXT: br i1 [[TMP44]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: .omp.lastprivate.then.i: -// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP33]], align 128 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP33]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP45]], ptr [[TMP23]], align 128 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP24]], ptr align 4 [[TMP34]], i64 8, i1 false) // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr [[STRUCT_S_0:%.*]], ptr [[TMP26]], i64 2 @@ -897,7 +897,7 @@ // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 64 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], ptr [[TMP3]], i32 0, i32 2 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], ptr [[TMP7]], i32 0, i32 2 @@ -973,7 +973,7 @@ // CHECK1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK1-NEXT: ret void // @@ -1007,7 +1007,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1020,7 +1020,7 @@ // CHECK3-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 16, ptr @.omp_task_entry.) // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP6]], i32 0, i32 1 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 5 @@ -1031,7 +1031,7 @@ // CHECK3-NEXT: store i64 1, ptr [[TMP13]], align 8 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 9 // CHECK3-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false) -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP6]], i32 1, ptr [[TMP11]], ptr [[TMP12]], i64 [[TMP15]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK3-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1082,79 +1082,79 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK3-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK3-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK3-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP27]] to i32 -// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK3-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK3: omp.inner.for.cond.i: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK3-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP29]] // CHECK3-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK3: omp.inner.for.body.i: -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: store i32 11, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: store ptr [[TMP25]], ptr [[REF_TMP_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: store i32 11, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: store ptr [[TMP25]], ptr [[REF_TMP_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP_I]], i32 0, i32 1 -// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP31]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(16) [[REF_TMP_I]]), !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP31]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(16) [[REF_TMP_I]]), !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[ADD3_I:%.*]] = add nsw i32 [[TMP32]], 1 -// CHECK3-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK3: omp.inner.for.end.i: -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK3-NEXT: br i1 [[TMP34]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: .omp.lastprivate.then.i: -// CHECK3-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP25]], align 8 +// CHECK3-NEXT: [[TMP35:%.*]] = load double, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store volatile double [[TMP35]], ptr [[TMP22]], align 8 -// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP36]], ptr [[TMP24]], align 4 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: @@ -1173,7 +1173,7 @@ // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: ret void @@ -1210,7 +1210,7 @@ // CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK4-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1223,7 +1223,7 @@ // CHECK4-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 16, ptr @.omp_task_entry.) // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP6]], i32 0, i32 1 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 5 @@ -1234,7 +1234,7 @@ // CHECK4-NEXT: store i64 1, ptr [[TMP13]], align 8 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP7]], i32 0, i32 9 // CHECK4-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP14]], i8 0, i64 8, i1 false) -// CHECK4-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK4-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP6]], i32 1, ptr [[TMP11]], ptr [[TMP12]], i64 [[TMP15]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK4-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP1]]) // CHECK4-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP1]]) @@ -1299,93 +1299,93 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK4-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK4-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK4-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK4-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK4-NEXT: call void [[TMP20]](ptr [[TMP21]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR4:[0-9]+]] // CHECK4-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP19]], align 8 // CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK4-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK4-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK4-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP27]] to i32 -// CHECK4-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK4-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK4: omp.inner.for.cond.i: -// CHECK4-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK4-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK4-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP29]] // CHECK4-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK4: omp.inner.for.body.i: -// CHECK4-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store i32 11, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store i32 [[TMP30]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: store double 1.000000e+00, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: store i32 11, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_FLAGS_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 1 -// CHECK4-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_RESERVED_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 2 -// CHECK4-NEXT: store i32 0, ptr [[BLOCK_RESERVED_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: store i32 0, ptr [[BLOCK_RESERVED_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_INVOKE_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 3 -// CHECK4-NEXT: store ptr @_block_invoke, ptr [[BLOCK_INVOKE_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: store ptr @_block_invoke, ptr [[BLOCK_INVOKE_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_DESCRIPTOR_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 4 -// CHECK4-NEXT: store ptr @__block_descriptor_tmp.2, ptr [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: store ptr @__block_descriptor_tmp.2, ptr [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_CAPTURED_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 5 -// CHECK4-NEXT: [[TMP31:%.*]] = load volatile double, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store volatile double [[TMP31]], ptr [[BLOCK_CAPTURED_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: [[TMP31:%.*]] = load volatile double, ptr [[TMP25]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store volatile double [[TMP31]], ptr [[BLOCK_CAPTURED_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[BLOCK_CAPTURED3_I:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK_I]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: store i32 [[TMP32]], ptr [[BLOCK_CAPTURED3_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP26]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK4-NEXT: store i32 [[TMP32]], ptr [[BLOCK_CAPTURED3_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] // CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], ptr [[BLOCK_I]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: call void [[TMP34]](ptr noundef [[BLOCK_I]]) #[[ATTR4]], !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK4-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: call void [[TMP34]](ptr noundef [[BLOCK_I]]) #[[ATTR4]], !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP35]], 1 -// CHECK4-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK4-NEXT: store i32 [[ADD4_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK4: omp.inner.for.end.i: -// CHECK4-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK4-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP37:%.*]] = icmp ne i32 [[TMP36]], 0 // CHECK4-NEXT: br i1 [[TMP37]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK4: .omp.lastprivate.then.i: -// CHECK4-NEXT: [[TMP38:%.*]] = load double, ptr [[TMP25]], align 8 +// CHECK4-NEXT: [[TMP38:%.*]] = load double, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store volatile double [[TMP38]], ptr [[TMP22]], align 8 -// CHECK4-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP26]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP39]], ptr [[TMP24]], align 4 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK4: .omp_outlined..1.exit: @@ -1404,7 +1404,7 @@ // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK4-NEXT: ret void @@ -1419,7 +1419,7 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store ptr [[S]], ptr [[S_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK5-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 3, ptr @.omp_outlined., i64 [[TMP1]], ptr [[A_ADDR]], ptr [[S_ADDR]]) // CHECK5-NEXT: ret void @@ -1440,11 +1440,11 @@ // CHECK5-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store ptr [[S]], ptr [[S_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[S_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK5-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 // CHECK5-NEXT: br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1459,7 +1459,7 @@ // CHECK5-NEXT: [[TMP10:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP4]], i32 1, i64 96, i64 24, ptr @.omp_task_entry.) // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP10]], i32 0, i32 0 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP11]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP13]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP10]], i32 0, i32 1 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP11]], i32 0, i32 5 @@ -1470,7 +1470,7 @@ // CHECK5-NEXT: store i64 1, ptr [[TMP17]], align 8 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP11]], i32 0, i32 9 // CHECK5-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP18]], i8 0, i64 8, i1 false) -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP4]], ptr [[TMP10]], i32 1, ptr [[TMP15]], ptr [[TMP16]], i64 [[TMP19]], i32 1, i32 0, i64 0, ptr @.omp_task_dup.) // CHECK5-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP4]]) // CHECK5-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP4]]) @@ -1520,75 +1520,75 @@ // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP19]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK5-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP19]], align 8, !noundef [[NOUNDEF3]] +// CHECK5-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK5-NEXT: call void [[TMP21]](ptr [[TMP22]], ptr [[DOTLASTPRIV_PTR_ADDR_I]], ptr [[DOTLASTPRIV_PTR_ADDR1_I]]) #[[ATTR2:[0-9]+]] // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 // CHECK5-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK5-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8 -// CHECK5-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 -// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK5-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTLASTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 +// CHECK5-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP29]] to i32 -// CHECK5-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK5-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK5: omp.inner.for.cond.i: -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP30]] to i64 -// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP31]] // CHECK5-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK5: omp.inner.for.body.i: -// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK5-NEXT: store i32 [[TMP32]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK5-NEXT: store i32 [[TMP32]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[ADD3_I:%.*]] = add nsw i32 [[TMP33]], 1 -// CHECK5-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD3_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK5: omp.inner.for.end.i: -// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK5-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK5-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 // CHECK5-NEXT: br i1 [[TMP35]], label [[DOTOMP_LASTPRIVATE_THEN_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK5: .omp.lastprivate.then.i: -// CHECK5-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP27]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store ptr [[TMP36]], ptr [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP28]], align 8 +// CHECK5-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP28]], align 8, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store ptr [[TMP37]], ptr [[TMP26]], align 8 // CHECK5-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK5: .omp_outlined..1.exit: @@ -1607,7 +1607,7 @@ // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTADDR2]], align 4, !noundef [[NOUNDEF3]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK5-NEXT: ret void @@ -1638,7 +1638,7 @@ // CHECK6-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK6-NEXT: [[TMP1:%.*]] = load ptr, ptr [[J_ADDR]], align 8 // CHECK6-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK6-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK6-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_master(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK6-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK6-NEXT: br i1 [[TMP5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1651,7 +1651,7 @@ // CHECK6-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i64 80, i64 16, ptr @.omp_task_entry.) // CHECK6-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP8]], i32 0, i32 0 // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK6-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP11]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP9]], i32 0, i32 5 // CHECK6-NEXT: store i64 0, ptr [[TMP12]], align 8 @@ -1661,7 +1661,7 @@ // CHECK6-NEXT: store i64 1, ptr [[TMP14]], align 8 // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP9]], i32 0, i32 9 // CHECK6-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP15]], i8 0, i64 8, i1 false) -// CHECK6-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP14]], align 8 +// CHECK6-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP3]], ptr [[TMP8]], i32 1, ptr [[TMP12]], ptr [[TMP13]], i64 [[TMP16]], i32 1, i32 0, i64 0, ptr null) // CHECK6-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP3]]) // CHECK6-NEXT: call void @__kmpc_end_master(ptr @[[GLOB1]], i32 [[TMP3]]) @@ -1693,80 +1693,80 @@ // CHECK6-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK6-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK6-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK6-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK6-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK6-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK6-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK6-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 +// CHECK6-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +// CHECK6-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8 +// CHECK6-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8 +// CHECK6-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK6-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK6-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK6-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK6-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP9]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP11]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i64 [[TMP13]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store i32 [[TMP15]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP17]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK6-NEXT: [[TMP18:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK6-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK6-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 -// CHECK6-NEXT: store i32 [[TMP20]], ptr [[DOTLINEAR_START_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF3]] +// CHECK6-NEXT: store i32 [[TMP20]], ptr [[DOTLINEAR_START_I]], align 4, !noalias !15 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP18]], i32 0, i32 1 // CHECK6-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 -// CHECK6-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -// CHECK6-NEXT: store i32 [[TMP23]], ptr [[DOTLINEAR_START1_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK6-NEXT: store i32 [[TMP23]], ptr [[DOTLINEAR_START1_I]], align 4, !noalias !15 // CHECK6-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP18]], align 8 -// CHECK6-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK6-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP25]] to i32 -// CHECK6-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK6-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK6: omp.inner.for.cond.i: -// CHECK6-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK6-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CONV3_I:%.*]] = sext i32 [[TMP26]] to i64 -// CHECK6-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK6-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV3_I]], [[TMP27]] // CHECK6-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[OMP_INNER_FOR_END_I:%.*]] // CHECK6: omp.inner.for.body.i: -// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: store i32 [[TMP28]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTLINEAR_START1_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK6-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK6-NEXT: store i32 [[TMP28]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK6-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTLINEAR_START1_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK6-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[ADD5_I:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] -// CHECK6-NEXT: store i32 [[ADD5_I]], ptr [[J_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: [[TMP31:%.*]] = load i32, ptr [[J_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK6-NEXT: store i32 [[ADD5_I]], ptr [[J_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK6-NEXT: [[TMP31:%.*]] = load i32, ptr [[J_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[INC_I:%.*]] = add nsw i32 [[TMP31]], 1 -// CHECK6-NEXT: store i32 [[INC_I]], ptr [[J_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK6-NEXT: store i32 [[INC_I]], ptr [[J_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK6-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[ADD6_I:%.*]] = add nsw i32 [[TMP32]], 1 -// CHECK6-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK6-NEXT: store i32 [[ADD6_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK6-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK6: omp.inner.for.end.i: -// CHECK6-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK6-NEXT: br i1 [[TMP34]], label [[DOTOMP_LINEAR_PU_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK6: .omp.linear.pu.i: // CHECK6-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP18]], align 8 // CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP18]], i32 0, i32 1 // CHECK6-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 -// CHECK6-NEXT: [[TMP38:%.*]] = load i32, ptr [[J_I]], align 4, !noalias !14 +// CHECK6-NEXT: [[TMP38:%.*]] = load i32, ptr [[J_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK6-NEXT: store i32 [[TMP38]], ptr [[TMP37]], align 4 // CHECK6-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK6: .omp_outlined..1.exit: @@ -1805,7 +1805,7 @@ // CHECK7-NEXT: call void @_ZN1SIdEC1Ed(ptr noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00) // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2:![0-9]+]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK7-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: call void @_ZN1SIdEC1Ev(ptr noundef nonnull align 8 dereferenceable(8) [[VAR1]]) @@ -1821,36 +1821,36 @@ // CHECK7: arrayctor.cont: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV5:%.*]] = sext i32 [[TMP1]] to i64 -// CHECK7-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV5]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK7: omp.inner.for.cond.cleanup: // CHECK7-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC4]], i64 0, i64 0 -// CHECK7-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR3]], i64 0, i64 0 -// CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[ARRAYIDX6]], ptr align 8 [[VAR1]], i64 8, i1 false), !llvm.access.group [[ACC_GRP2]] -// CHECK7-NEXT: store i32 33, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[ARRAYIDX6]], ptr align 8 [[VAR1]], i64 8, i1 false), !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: store i32 33, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 10, ptr [[I]], align 4 // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[VAR]], ptr align 8 [[VAR1]], i64 8, i1 false) -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR2]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR2]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR]], i32 0, i32 0 // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr [[STRUCT_S]], ptr [[ARRAY_BEGIN8]], i64 2 @@ -1866,7 +1866,7 @@ // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE9]], label [[OMP_ARRAYCPY_BODY]] // CHECK7: omp.arraycpy.done9: // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 [[VEC4]], i64 8, i1 false) -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP8]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[ARRAY_BEGIN10:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[S_ARR3]], i32 0, i32 0 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], ptr [[ARRAY_BEGIN10]], i64 2 @@ -1916,7 +1916,7 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK7-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: call void @_ZN1SIdEC2Ed(ptr noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]]) // CHECK7-NEXT: ret void // @@ -1961,7 +1961,7 @@ // CHECK7-NEXT: call void @_ZN1SIiEC1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3) // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK7-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR3]], i32 0, i32 0 @@ -1977,34 +1977,34 @@ // CHECK7-NEXT: call void @_ZN1SIiEC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[VAR4]]) // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CONV5:%.*]] = sext i32 [[TMP1]] to i64 -// CHECK7-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV5]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK7: omp.inner.for.cond.cleanup: // CHECK7-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR1]], align 128, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[T_VAR1]], align 128, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[VEC2]], i64 0, i64 0 -// CHECK7-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: store i32 [[TMP4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR3]], i64 0, i64 0 -// CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[VAR4]], i64 4, i1 false), !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[ARRAYIDX6]], ptr align 4 [[VAR4]], i64 4, i1 false), !llvm.access.group [[ACC_GRP7]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 128 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 128, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[T_VAR]], align 128 // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 [[VEC2]], i64 8, i1 false) // CHECK7-NEXT: [[ARRAY_BEGIN8:%.*]] = getelementptr inbounds [2 x %struct.S.0], ptr [[S_ARR]], i32 0, i32 0 @@ -2079,7 +2079,7 @@ // CHECK7-NEXT: store double [[A]], ptr [[A_ADDR]], align 8 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8 +// CHECK7-NEXT: [[TMP0:%.*]] = load double, ptr [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store double [[TMP0]], ptr [[F]], align 8 // CHECK7-NEXT: ret void // @@ -2102,7 +2102,7 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: call void @_ZN1SIiEC2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK7-NEXT: ret void // @@ -2137,7 +2137,7 @@ // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK7-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[F]], align 4 // CHECK7-NEXT: ret void // @@ -2188,55 +2188,55 @@ // CHECK10-NEXT: store ptr [[DOTBLOCK_DESCRIPTOR]], ptr [[BLOCK_ADDR]], align 8 // CHECK10-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK10-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK10-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK10-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2:![0-9]+]] // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK10-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK10: omp.inner.for.cond: -// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK10-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64 -// CHECK10-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK10-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP2]] // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK10: omp.inner.for.body: -// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK10-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: store i32 11, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK10-NEXT: store double 1.000000e+00, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK10-NEXT: store i32 11, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 0 -// CHECK10-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_ISA]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store ptr @_NSConcreteStackBlock, ptr [[BLOCK_ISA]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 1 -// CHECK10-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store i32 1073741824, ptr [[BLOCK_FLAGS]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 2 -// CHECK10-NEXT: store i32 0, ptr [[BLOCK_RESERVED]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store i32 0, ptr [[BLOCK_RESERVED]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 3 -// CHECK10-NEXT: store ptr @__main_block_invoke_2, ptr [[BLOCK_INVOKE]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store ptr @__main_block_invoke_2, ptr [[BLOCK_INVOKE]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 4 -// CHECK10-NEXT: store ptr @__block_descriptor_tmp.1, ptr [[BLOCK_DESCRIPTOR]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: store ptr @__block_descriptor_tmp.1, ptr [[BLOCK_DESCRIPTOR]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 5 -// CHECK10-NEXT: [[TMP4:%.*]] = load volatile double, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: store volatile double [[TMP4]], ptr [[BLOCK_CAPTURED]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP4:%.*]] = load volatile double, ptr [[G]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK10-NEXT: store volatile double [[TMP4]], ptr [[BLOCK_CAPTURED]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[BLOCK_CAPTURED2:%.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, double, i32 }>, ptr [[BLOCK]], i32 0, i32 6 -// CHECK10-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: store i32 [[TMP5]], ptr [[BLOCK_CAPTURED2]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK10-NEXT: store i32 [[TMP5]], ptr [[BLOCK_CAPTURED2]], align 8, !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], ptr [[BLOCK]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: call void [[TMP7]](ptr noundef [[BLOCK]]), !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK10-NEXT: call void [[TMP7]](ptr noundef [[BLOCK]]), !llvm.access.group [[ACC_GRP3]] // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK10: omp.body.continue: // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: -// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK10-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK10-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK10: omp.inner.for.end: // CHECK10-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK10-NEXT: [[TMP9:%.*]] = load double, ptr [[G]], align 8 +// CHECK10-NEXT: [[TMP9:%.*]] = load double, ptr [[G]], align 8, !noundef [[NOUNDEF2]] // CHECK10-NEXT: store volatile double [[TMP9]], ptr @g, align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK10-NEXT: store i32 [[TMP10]], ptr @_ZZ4mainE5sivar, align 4 // CHECK10-NEXT: ret void // @@ -2271,38 +2271,38 @@ // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK11-NEXT: store ptr [[S]], ptr [[S_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK11-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK11-NEXT: [[CONV3:%.*]] = sext i32 [[TMP3]] to i64 -// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV3]], [[TMP4]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK11-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[A1]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[A1]], align 8, !noundef [[NOUNDEF2]] // CHECK11-NEXT: store ptr [[TMP7]], ptr [[A_ADDR]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = load ptr, ptr [[S2]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = load ptr, ptr [[S2]], align 8, !noundef [[NOUNDEF2]] // CHECK11-NEXT: store ptr [[TMP8]], ptr [[S_ADDR]], align 8 // CHECK11-NEXT: ret void // @@ -2323,44 +2323,44 @@ // CHECK12-NEXT: [[J4:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK12-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK12-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK12-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2:![0-9]+]] // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 // CHECK12-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 -// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4 +// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK12-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START]], align 4 -// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[J]], align 4 +// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[J]], align 4, !noundef [[NOUNDEF2]] // CHECK12-NEXT: store i32 [[TMP2]], ptr [[DOTLINEAR_START1]], align 4 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK12: omp.inner.for.cond: -// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[CONV5:%.*]] = sext i32 [[TMP3]] to i64 -// CHECK12-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV5]], [[TMP4]] // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK12: omp.inner.for.body: -// CHECK12-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 1 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK12-NEXT: store i32 [[ADD]], ptr [[I2]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK12-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: store i32 [[ADD]], ptr [[I2]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK12-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK12-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP6]], [[MUL6]] -// CHECK12-NEXT: store i32 [[ADD7]], ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: store i32 [[ADD7]], ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK12-NEXT: store i32 [[INC]], ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: store i32 [[INC]], ptr [[J4]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK12: omp.body.continue: // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: -// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK12-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK12-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK12-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK12: omp.inner.for.end: // CHECK12-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[J4]], align 4 +// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[J4]], align 4, !noundef [[NOUNDEF2]] // CHECK12-NEXT: store i32 [[TMP10]], ptr [[J]], align 4 // CHECK12-NEXT: ret void // diff --git a/clang/test/OpenMP/parallel_reduction_task_codegen.cpp b/clang/test/OpenMP/parallel_reduction_task_codegen.cpp --- a/clang/test/OpenMP/parallel_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/parallel_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[ARGC_ADDR]], ptr [[TMP0]]) // CHECK1-NEXT: ret i32 0 // @@ -71,14 +71,14 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -114,155 +114,155 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP55]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP56]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: store ptr [[TMP58]], ptr [[TMP57]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[TMP59]], align 4 -// CHECK1-NEXT: [[TMP61:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP60]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP61]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP63]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[TMP64]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP65]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP61]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP67]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP69]], ptr [[TMP68]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP71:%.*]] = load i32, ptr [[TMP70]], align 4 -// CHECK1-NEXT: [[TMP72:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP71]], ptr [[TMP61]]) +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP51]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP52]], align 8 +// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP54]], ptr [[TMP53]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP56]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP57]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP58]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[TMP59]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP60]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP57]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP61]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[TMP64]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP66:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP65]], ptr [[TMP57]]) +// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP68]], i32 0) +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP69]], align 8 +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP70]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP72:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP71]], align 8 // CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP74]], i32 0) -// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP75]], align 8 -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP79:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP83:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP81]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP83]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP74]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP75]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP84:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP84]], [[TMP85]] +// CHECK1-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP76]], [[TMP77]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP86]] +// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP78]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP87:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP87]] to i32 -// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP88]] to i32 +// CHECK1-NEXT: [[TMP79:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP79]] to i32 +// CHECK1-NEXT: [[TMP80:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP80]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP86]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP78]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done18: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP81]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP74]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP90:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP89]] monotonic, align 4 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP82:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP81]] monotonic, align 4 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP83]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]] // CHECK1: omp.arraycpy.body20: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP84]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP93:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP98:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP93]], ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[TMP94:%.*]] = load i8, ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP94]] to i32 -// CHECK1-NEXT: [[TMP95:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP95]] to i32 +// CHECK1-NEXT: [[TMP85:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP90:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP85]], ptr [[_TMP24]], align 1 +// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[_TMP24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP86]] to i32 +// CHECK1-NEXT: [[TMP87:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP87]] to i32 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]] // CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8 // CHECK1-NEXT: store i8 [[CONV28]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP96:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP97:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP93]], i8 [[TMP96]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP98]] = extractvalue { i8, i1 } [[TMP97]], 0 -// CHECK1-NEXT: [[TMP99:%.*]] = extractvalue { i8, i1 } [[TMP97]], 1 -// CHECK1-NEXT: br i1 [[TMP99]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP89:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP85]], i8 [[TMP88]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP90]] = extractvalue { i8, i1 } [[TMP89]], 0 +// CHECK1-NEXT: [[TMP91:%.*]] = extractvalue { i8, i1 } [[TMP89]], 1 +// CHECK1-NEXT: br i1 [[TMP91]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP91]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP83]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]] // CHECK1: omp.arraycpy.done32: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP100:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP100]]) +// CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP92]]) // CHECK1-NEXT: ret void // // @@ -273,8 +273,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -285,12 +285,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -303,7 +303,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -325,7 +325,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -334,9 +334,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -379,65 +379,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -449,38 +449,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp b/clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp --- a/clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/parallel_sections_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[ARGC_ADDR]], ptr [[TMP0]]) // CHECK1-NEXT: ret i32 0 // @@ -80,14 +80,14 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -123,188 +123,188 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP58:%.*]] = icmp slt i32 [[TMP57]], 0 -// CHECK1-NEXT: [[TMP59:%.*]] = select i1 [[TMP58]], i32 [[TMP57]], i32 0 -// CHECK1-NEXT: store i32 [[TMP59]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 -// CHECK1-NEXT: store i32 [[TMP60]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP52]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP54:%.*]] = icmp slt i32 [[TMP53]], 0 +// CHECK1-NEXT: [[TMP55:%.*]] = select i1 [[TMP54]], i32 [[TMP53]], i32 0 +// CHECK1-NEXT: store i32 [[TMP55]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP56]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP61]], [[TMP62]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP57]], [[TMP58]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: switch i32 [[TMP63]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ +// CHECK1-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: switch i32 [[TMP59]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.sections.case: -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP64]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: store ptr [[TMP67]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load i32, ptr [[TMP68]], align 4 -// CHECK1-NEXT: [[TMP70:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP69]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP70]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP72]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[TMP73]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP74]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP70]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP76]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP78]], ptr [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP79]], align 4 -// CHECK1-NEXT: [[TMP81:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP80]], ptr [[TMP70]]) +// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP60]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[TMP64]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP66:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP65]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP66]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[TMP68]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP69]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP66]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP70]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP72:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP71]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP74]], ptr [[TMP66]]) // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP82:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP82]], 1 +// CHECK1-NEXT: [[TMP76:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP76]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: -// CHECK1-NEXT: [[TMP83:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP84:%.*]] = load i32, ptr [[TMP83]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP84]]) +// CHECK1-NEXT: [[TMP77:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP77]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP78]]) +// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP79]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP80]], i32 1) +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP81]], align 8 +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP84:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP84]], ptr [[TMP83]], align 8 // CHECK1-NEXT: [[TMP85:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP86]], i32 1) -// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP87]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP91:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP91]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load i32, ptr [[TMP92]], align 4 -// CHECK1-NEXT: [[TMP95:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP93]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP95]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP87:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP86]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP87]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP96]], [[TMP97]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP88]], [[TMP89]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP98]] +// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP90]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP99]] to i32 -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP100]] to i32 +// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP91]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP92]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP98]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP90]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done18: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP93]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP86]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP101:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP102:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP101]] monotonic, align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP103]] +// CHECK1-NEXT: [[TMP93:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP94:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP93]] monotonic, align 4 +// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP95]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]] // CHECK1: omp.arraycpy.body20: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP104:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP104]] to i32 +// CHECK1-NEXT: [[TMP96:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP96]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP105:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP110:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP105]], ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[TMP106:%.*]] = load i8, ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP106]] to i32 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP107]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP102:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP97]], ptr [[_TMP24]], align 1 +// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[_TMP24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP98]] to i32 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP99]] to i32 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]] // CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8 // CHECK1-NEXT: store i8 [[CONV28]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP105]], i8 [[TMP108]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP110]] = extractvalue { i8, i1 } [[TMP109]], 0 -// CHECK1-NEXT: [[TMP111:%.*]] = extractvalue { i8, i1 } [[TMP109]], 1 -// CHECK1-NEXT: br i1 [[TMP111]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP101:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP97]], i8 [[TMP100]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP102]] = extractvalue { i8, i1 } [[TMP101]], 0 +// CHECK1-NEXT: [[TMP103:%.*]] = extractvalue { i8, i1 } [[TMP101]], 1 +// CHECK1-NEXT: br i1 [[TMP103]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP103]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP95]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]] // CHECK1: omp.arraycpy.done32: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP112:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP112]]) +// CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) // CHECK1-NEXT: ret void // // @@ -315,8 +315,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -327,12 +327,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -345,7 +345,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -367,7 +367,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -376,9 +376,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -421,65 +421,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -491,38 +491,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/sections_reduction_task_codegen.cpp b/clang/test/OpenMP/sections_reduction_task_codegen.cpp --- a/clang/test/OpenMP/sections_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/sections_reduction_task_codegen.cpp @@ -81,14 +81,14 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_SECTIONS_ST_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_SECTIONS_IL_]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 0 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP5]] -// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP6]], i64 9 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i64 [[LB_ADD_LEN]] @@ -124,192 +124,192 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP31]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP29]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 0 -// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP37:%.*]] = sext i32 [[TMP36]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP37]] -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP38]], i64 9 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP39]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP40]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP43:%.*]] = sub i64 [[TMP41]], [[TMP42]] -// CHECK1-NEXT: [[TMP44:%.*]] = sdiv exact i64 [[TMP43]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP45:%.*]] = add nuw i64 [[TMP44]], 1 -// CHECK1-NEXT: [[TMP46:%.*]] = mul nuw i64 [[TMP45]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP51]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 0 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP34:%.*]] = sext i32 [[TMP33]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP34]] +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP1]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP35]], i64 9 +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP30]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = sub i64 [[TMP38]], [[TMP39]] +// CHECK1-NEXT: [[TMP41:%.*]] = sdiv exact i64 [[TMP40]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP42:%.*]] = add nuw i64 [[TMP41]], 1 +// CHECK1-NEXT: [[TMP43:%.*]] = mul nuw i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP43]], ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP49]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP51:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP50]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP51]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP52:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[TMP52]], align 4 -// CHECK1-NEXT: [[TMP55:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP53]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP55]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[TMP56]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP57]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP59:%.*]] = icmp slt i32 [[TMP58]], 0 -// CHECK1-NEXT: [[TMP60:%.*]] = select i1 [[TMP59]], i32 [[TMP58]], i32 0 -// CHECK1-NEXT: store i32 [[TMP60]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[TMP61:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4 -// CHECK1-NEXT: store i32 [[TMP61]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[TMP52]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP53]], i32 34, ptr [[DOTOMP_SECTIONS_IL_]], ptr [[DOTOMP_SECTIONS_LB_]], ptr [[DOTOMP_SECTIONS_UB_]], ptr [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP55:%.*]] = icmp slt i32 [[TMP54]], 0 +// CHECK1-NEXT: [[TMP56:%.*]] = select i1 [[TMP55]], i32 [[TMP54]], i32 0 +// CHECK1-NEXT: store i32 [[TMP56]], ptr [[DOTOMP_SECTIONS_UB_]], align 4 +// CHECK1-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_LB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP57]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP62:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_UB_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP58]], [[TMP59]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: switch i32 [[TMP64]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ +// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: switch i32 [[TMP60]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.sections.case: -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP67]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: [[TMP71:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP70]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP73]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP75:%.*]] = load ptr, ptr [[TMP74]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP75]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP71]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP77]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP81]], ptr [[TMP71]]) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP67:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP66]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP68]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[TMP69]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP70]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP67]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP71]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP74]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP75]], ptr [[TMP67]]) // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] // CHECK1: .omp.sections.exit: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP83:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4 -// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP83]], 1 +// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[DOTOMP_SECTIONS_IV_]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP77]], 1 // CHECK1-NEXT: store i32 [[INC]], ptr [[DOTOMP_SECTIONS_IV_]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: -// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP85]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP79]]) +// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP81]], i32 1) +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP83]], align 8 +// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP85:%.*]] = inttoptr i64 [[TMP12]] to ptr +// CHECK1-NEXT: store ptr [[TMP85]], ptr [[TMP84]], align 8 // CHECK1-NEXT: [[TMP86:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP87]], i32 1) -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP92:%.*]] = inttoptr i64 [[TMP12]] to ptr -// CHECK1-NEXT: store ptr [[TMP92]], ptr [[TMP91]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP93]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB3:[0-9]+]], i32 [[TMP94]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP96]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB3:[0-9]+]], i32 [[TMP87]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP88]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP97]], [[TMP98]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP89]], [[TMP90]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP99]] +// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP100]] to i32 -// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP101]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP93:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP93]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP99]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done18: -// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP94]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP87]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP102:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP102]] monotonic, align 4 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP104]] +// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP95:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP94]] monotonic, align 4 +// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP12]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]] // CHECK1: omp.arraycpy.body20: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP105]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP97]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP106:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP111:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP106]], ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP107]] to i32 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP108]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP103:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP98]], ptr [[_TMP24]], align 1 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[_TMP24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP100]] to i32 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]] // CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8 // CHECK1-NEXT: store i8 [[CONV28]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP110:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP106]], i8 [[TMP109]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP111]] = extractvalue { i8, i1 } [[TMP110]], 0 -// CHECK1-NEXT: [[TMP112:%.*]] = extractvalue { i8, i1 } [[TMP110]], 1 -// CHECK1-NEXT: br i1 [[TMP112]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP102:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP98]], i8 [[TMP101]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP103]] = extractvalue { i8, i1 } [[TMP102]], 0 +// CHECK1-NEXT: [[TMP104:%.*]] = extractvalue { i8, i1 } [[TMP102]], 1 +// CHECK1-NEXT: br i1 [[TMP104]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP104]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]] // CHECK1: omp.arraycpy.done32: -// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP94]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB3]], i32 [[TMP87]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP113:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP113]]) -// CHECK1-NEXT: [[TMP114:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = load i32, ptr [[TMP114]], align 4 -// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP115]]) +// CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) +// CHECK1-NEXT: [[TMP106:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP106]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB4:[0-9]+]], i32 [[TMP107]]) // CHECK1-NEXT: ret void // // @@ -320,8 +320,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -332,12 +332,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -350,7 +350,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -372,7 +372,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -381,9 +381,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -426,65 +426,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -496,38 +496,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/target_defaultmap_codegen_01.cpp b/clang/test/OpenMP/target_defaultmap_codegen_01.cpp --- a/clang/test/OpenMP/target_defaultmap_codegen_01.cpp +++ b/clang/test/OpenMP/target_defaultmap_codegen_01.cpp @@ -1441,10 +1441,10 @@ // CK23-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK23-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to float** // CK23-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to float** -// CK23-DAG: store float* [[VAL:%.+]], float** [[CBP1]] -// CK23-DAG: store float* [[VAL]], float** [[CP1]] -// CK23-DAG: [[VAL]] = load float*, float** [[ADDR:%.+]], -// CK23-DAG: [[ADDR]] = load float**, float*** [[ADDR2:%.+]], +// CK23-DAG: store float* [[VAL:%.+]], float** [[CBP1]], align [[ALIGN:[0-9]]] +// CK23-DAG: store float* [[VAL]], float** [[CP1]], align [[ALIGN]] +// CK23-DAG: [[VAL]] = load float*, float** [[ADDR:%.+]], align [[ALIGN]] +// CK23-DAG: [[ADDR]] = load float**, float*** [[ADDR2:%.+]], align [[ALIGN]] // CK23: call void [[KERNEL:@.+]](float* [[VAL]]) #pragma omp target is_device_ptr(lr) defaultmap(none \ @@ -1464,10 +1464,10 @@ // CK23-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK23-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK23-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** -// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]] -// CK23-DAG: store i32* [[VAL]], i32** [[CP1]] -// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], -// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], +// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]], align [[ALIGN]] +// CK23-DAG: store i32* [[VAL]], i32** [[CP1]], align [[ALIGN]] +// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], align [[ALIGN]] +// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], align [[ALIGN]] // CK23: call void [[KERNEL:@.+]](i32* [[VAL]]) #pragma omp target is_device_ptr(tr) defaultmap(none \ @@ -1487,10 +1487,10 @@ // CK23-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK23-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK23-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** -// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]] -// CK23-DAG: store i32* [[VAL]], i32** [[CP1]] -// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], -// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], +// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]], align [[ALIGN]] +// CK23-DAG: store i32* [[VAL]], i32** [[CP1]], align [[ALIGN]] +// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], align [[ALIGN]] +// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], align [[ALIGN]] // CK23: call void [[KERNEL:@.+]](i32* [[VAL]]) #pragma omp target is_device_ptr(tr, lr) defaultmap(none \ @@ -1510,19 +1510,19 @@ // CK23-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK23-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK23-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** -// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]] -// CK23-DAG: store i32* [[VAL]], i32** [[CP1]] -// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], -// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], +// CK23-DAG: store i32* [[VAL:%.+]], i32** [[CBP1]], align [[ALIGN]] +// CK23-DAG: store i32* [[VAL]], i32** [[CP1]], align [[ALIGN]] +// CK23-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], align [[ALIGN]] +// CK23-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], align [[ALIGN]] // CK23-DAG: [[_BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK23-DAG: [[_P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 // CK23-DAG: [[_CBP1:%.+]] = bitcast i8** [[_BP1]] to float** // CK23-DAG: [[_CP1:%.+]] = bitcast i8** [[_P1]] to float** -// CK23-DAG: store float* [[_VAL:%.+]], float** [[_CBP1]] -// CK23-DAG: store float* [[_VAL]], float** [[_CP1]] -// CK23-DAG: [[_VAL]] = load float*, float** [[_ADDR:%.+]], -// CK23-DAG: [[_ADDR]] = load float**, float*** [[_ADDR2:%.+]], +// CK23-DAG: store float* [[_VAL:%.+]], float** [[_CBP1]], align [[ALIGN]] +// CK23-DAG: store float* [[_VAL]], float** [[_CP1]], align [[ALIGN]] +// CK23-DAG: [[_VAL]] = load float*, float** [[_ADDR:%.+]], align [[ALIGN]] +// CK23-DAG: [[_ADDR]] = load float**, float*** [[_ADDR2:%.+]], align [[ALIGN]] // CK23: call void [[KERNEL:@.+]](i32* [[VAL]], float* [[_VAL]]) #pragma omp target is_device_ptr(tr, lr) defaultmap(none \ diff --git a/clang/test/OpenMP/target_in_reduction_codegen.cpp b/clang/test/OpenMP/target_in_reduction_codegen.cpp --- a/clang/test/OpenMP/target_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/target_in_reduction_codegen.cpp @@ -593,24 +593,24 @@ // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[NOALIAS:![0-9]+]] +// CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i8***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias [[NOALIAS]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP15:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias [[NOALIAS]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias [[NOALIAS]] // CHECK1-NEXT: [[TMP17:%.*]] = bitcast void (i8*, ...)* [[TMP15]] to void (i8*, i8***)* // CHECK1-NEXT: call void [[TMP17]](i8* [[TMP16]], i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load i8**, i8*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias [[NOALIAS]] // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP20:%.*]] = load i32*, i32** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = load i8*, i8** [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias [[NOALIAS]] // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i32* [[TMP20]] to i8* // CHECK1-NEXT: [[TMP24:%.*]] = call i8* @__kmpc_task_reduction_get_th_data(i32 [[TMP22]], i8* [[TMP21]], i8* [[TMP23]]) // CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i8* [[TMP24]] to i32* diff --git a/clang/test/OpenMP/target_is_device_ptr_codegen.cpp b/clang/test/OpenMP/target_is_device_ptr_codegen.cpp --- a/clang/test/OpenMP/target_is_device_ptr_codegen.cpp +++ b/clang/test/OpenMP/target_is_device_ptr_codegen.cpp @@ -117,8 +117,8 @@ // CK1-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK1-DAG: store ptr [[VAL:%.+]], ptr [[BP1]] // CK1-DAG: store ptr [[VAL]], ptr [[P1]] -// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], -// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], +// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], align [[ALIGN:[0-9]]] +// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], align [[ALIGN]] // CK1: call void [[KERNEL:@.+]](ptr [[VAL]]) #pragma omp target is_device_ptr(lr) @@ -137,8 +137,8 @@ // CK1-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK1-DAG: store ptr [[VAL:%.+]], ptr [[BP1]] // CK1-DAG: store ptr [[VAL]], ptr [[P1]] -// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], -// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], +// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], align [[ALIGN2:[0-9]]] +// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], align [[ALIGN2]] // CK1: call void [[KERNEL:@.+]](ptr [[VAL]]) #pragma omp target is_device_ptr(tr) @@ -157,8 +157,8 @@ // CK1-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK1-DAG: store ptr [[VAL:%.+]], ptr [[BP1]] // CK1-DAG: store ptr [[VAL]], ptr [[P1]] -// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], -// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], +// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], align [[ALIGN2]] +// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], align [[ALIGN2]] // CK1: call void [[KERNEL:@.+]](ptr [[VAL]]) #pragma omp target is_device_ptr(tr, lr) @@ -177,15 +177,15 @@ // CK1-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 // CK1-DAG: store ptr [[VAL:%.+]], ptr [[BP1]] // CK1-DAG: store ptr [[VAL]], ptr [[P1]] -// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], -// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], +// CK1-DAG: [[VAL]] = load ptr, ptr [[ADDR:%.+]], align [[ALIGN2]] +// CK1-DAG: [[ADDR]] = load ptr, ptr [[ADDR2:%.+]], align [[ALIGN2]] // CK1-DAG: [[_BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK1-DAG: [[_P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 // CK1-DAG: store ptr [[_VAL:%.+]], ptr [[_BP1]] // CK1-DAG: store ptr [[_VAL]], ptr [[_P1]] -// CK1-DAG: [[_VAL]] = load ptr, ptr [[_ADDR:%.+]], -// CK1-DAG: [[_ADDR]] = load ptr, ptr [[_ADDR2:%.+]], +// CK1-DAG: [[_VAL]] = load ptr, ptr [[_ADDR:%.+]], align [[ALIGN]] +// CK1-DAG: [[_ADDR]] = load ptr, ptr [[_ADDR2:%.+]], align [[ALIGN]] // CK1: call void [[KERNEL:@.+]](ptr [[VAL]], ptr [[_VAL]]) #pragma omp target is_device_ptr(tr, lr) diff --git a/clang/test/OpenMP/target_map_codegen_00.cpp b/clang/test/OpenMP/target_map_codegen_00.cpp --- a/clang/test/OpenMP/target_map_codegen_00.cpp +++ b/clang/test/OpenMP/target_map_codegen_00.cpp @@ -86,7 +86,7 @@ // CK1-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK1-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK1-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK1-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK1-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK1-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK1-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_01.cpp b/clang/test/OpenMP/target_map_codegen_01.cpp --- a/clang/test/OpenMP/target_map_codegen_01.cpp +++ b/clang/test/OpenMP/target_map_codegen_01.cpp @@ -61,7 +61,7 @@ // CK2-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK2-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK2-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK2-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK2-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK2-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK2-64-DAG: store i32 {{.+}}, i32* [[CADDR]], @@ -85,8 +85,8 @@ // CK2-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK2-DAG: store i32* [[VAL:%[^,]+]], i32** [[CBP1]] // CK2-DAG: store i32* [[VAL]], i32** [[CP1]] -// CK2-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], -// CK2-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]], +// CK2-DAG: [[VAL]] = load i32*, i32** [[ADDR:%.+]], align [[ALIGN2:[0-9]]], !noundef [[NOUNDEF]] +// CK2-DAG: [[ADDR]] = load i32**, i32*** [[ADDR2:%.+]] // CK2: call void [[KERNEL2:@.+]](i32* [[VAL]]) #pragma omp target diff --git a/clang/test/OpenMP/target_map_codegen_02.cpp b/clang/test/OpenMP/target_map_codegen_02.cpp --- a/clang/test/OpenMP/target_map_codegen_02.cpp +++ b/clang/test/OpenMP/target_map_codegen_02.cpp @@ -55,7 +55,7 @@ // CK3-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK3-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK3-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK3-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK3-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK3-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK3-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_04.cpp b/clang/test/OpenMP/target_map_codegen_04.cpp --- a/clang/test/OpenMP/target_map_codegen_04.cpp +++ b/clang/test/OpenMP/target_map_codegen_04.cpp @@ -61,7 +61,7 @@ // CK5-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK5-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK5-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK5-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK5-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK5-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK5-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_05.cpp b/clang/test/OpenMP/target_map_codegen_05.cpp --- a/clang/test/OpenMP/target_map_codegen_05.cpp +++ b/clang/test/OpenMP/target_map_codegen_05.cpp @@ -55,7 +55,7 @@ // CK6-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK6-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK6-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK6-64-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK6-64-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK6-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK6-64-DAG: store i32 [[GBLVAL:%.+]], i32* [[CADDR]], // CK6-64-DAG: [[GBLVAL]] = load i32, i32* [[GBL]], diff --git a/clang/test/OpenMP/target_map_codegen_07.cpp b/clang/test/OpenMP/target_map_codegen_07.cpp --- a/clang/test/OpenMP/target_map_codegen_07.cpp +++ b/clang/test/OpenMP/target_map_codegen_07.cpp @@ -56,7 +56,7 @@ // CK8-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK8-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK8-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK8-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK8-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK8-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to float* // CK8-DAG: store float {{.+}}, float* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_11.cpp b/clang/test/OpenMP/target_map_codegen_11.cpp --- a/clang/test/OpenMP/target_map_codegen_11.cpp +++ b/clang/test/OpenMP/target_map_codegen_11.cpp @@ -62,7 +62,7 @@ // CK12-64-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK12-64-DAG: store i[[sz]] [[VAL:%[^,]+]], i[[sz]]* [[CBP1]] // CK12-64-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] -// CK12-64-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], +// CK12-64-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK12-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to { float, float }* // CK12-64-DAG: store { float, float } {{.+}}, { float, float }* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_13.cpp b/clang/test/OpenMP/target_map_codegen_13.cpp --- a/clang/test/OpenMP/target_map_codegen_13.cpp +++ b/clang/test/OpenMP/target_map_codegen_13.cpp @@ -106,7 +106,7 @@ // CK14-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK14-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK14-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK14-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], + // CK14-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK14-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK14-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_14.cpp b/clang/test/OpenMP/target_map_codegen_14.cpp --- a/clang/test/OpenMP/target_map_codegen_14.cpp +++ b/clang/test/OpenMP/target_map_codegen_14.cpp @@ -118,7 +118,7 @@ // CK15-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK15-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK15-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], + // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK15-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK15-64-DAG: store i32 {{.+}}, i32* [[CADDR]], @@ -166,7 +166,7 @@ // CK15-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK15-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK15-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], + // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK15-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK15-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_15.cpp b/clang/test/OpenMP/target_map_codegen_15.cpp --- a/clang/test/OpenMP/target_map_codegen_15.cpp +++ b/clang/test/OpenMP/target_map_codegen_15.cpp @@ -66,7 +66,7 @@ // CK16-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK16-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP1]] // CK16-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] - // CK16-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], + // CK16-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK16-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK16-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_17.cpp b/clang/test/OpenMP/target_map_codegen_17.cpp --- a/clang/test/OpenMP/target_map_codegen_17.cpp +++ b/clang/test/OpenMP/target_map_codegen_17.cpp @@ -65,7 +65,7 @@ // CK18-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK18-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP1]] // CK18-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] - // CK18-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], + // CK18-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK18-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK18-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_26.cpp b/clang/test/OpenMP/target_map_codegen_26.cpp --- a/clang/test/OpenMP/target_map_codegen_26.cpp +++ b/clang/test/OpenMP/target_map_codegen_26.cpp @@ -223,7 +223,7 @@ // CK27-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK27-DAG: store i[[Z]] [[VAL:%.+]], i[[Z]]* [[CBP1]] // CK27-DAG: store i[[Z]] [[VAL]], i[[Z]]* [[CP1]] -// CK27-DAG: [[VAL]] = load i[[Z]], i[[Z]]* [[ADDR:%.+]], +// CK27-DAG: [[VAL]] = load i[[Z]], i[[Z]]* [[ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK27-64-DAG: [[CADDR:%.+]] = bitcast i[[Z]]* [[ADDR]] to i32* // CK27-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_27.cpp b/clang/test/OpenMP/target_map_codegen_27.cpp --- a/clang/test/OpenMP/target_map_codegen_27.cpp +++ b/clang/test/OpenMP/target_map_codegen_27.cpp @@ -83,7 +83,7 @@ // CK28-DAG: [[VAR0]] = load ptr, ptr [[VAR00:%.+]], // CK28-DAG: [[VAR00]] = load ptr, ptr [[VAR000:%.+]], // CK28-DAG: [[VAR1]] = getelementptr inbounds i32, ptr [[VAR11:%.+]], i{{64|32}} 2 -// CK28-DAG: [[VAR11]] = load ptr, ptr [[VAR111:%.+]], +// CK28-DAG: [[VAR11]] = load ptr, ptr [[VAR111:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK28-DAG: [[VAR111]] = load ptr, ptr [[VAR1111:%.+]], // CK28: call void [[CALL01:@.+]](ptr {{[^,]+}}) diff --git a/clang/test/OpenMP/target_map_codegen_29.cpp b/clang/test/OpenMP/target_map_codegen_29.cpp --- a/clang/test/OpenMP/target_map_codegen_29.cpp +++ b/clang/test/OpenMP/target_map_codegen_29.cpp @@ -90,7 +90,7 @@ // CK30-DAG: store ptr [[S_PTR1_BEGIN:%.+]], ptr [[PTR]], // CK30-DAG: [[S_PTR1]] = getelementptr inbounds [[STRUCT]], ptr [[S]], i32 0, i32 4 // CK30-DAG: [[S_PTR1_BEGIN]] = getelementptr inbounds i32, ptr [[S_PTR1_BEGIN_REF:%.+]], i{{64|32}} 0 -// CK30-DAG: [[S_PTR1_BEGIN_REF]] = load ptr, ptr [[S_PTR1:%.+]], +// CK30-DAG: [[S_PTR1_BEGIN_REF]] = load ptr, ptr [[S_PTR1:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK30-DAG: [[S_PTR1]] = getelementptr inbounds [[STRUCT]], ptr [[S]], i32 0, i32 4 // CK30-DAG: [[BASE_PTR:%.+]] = getelementptr inbounds [4 x ptr], ptr [[BASES]], i32 0, i32 3 @@ -99,7 +99,7 @@ // CK30-DAG: store ptr [[S_PTRBASE1_BEGIN:%.+]], ptr [[PTR]], // CK30-DAG: [[S_PTRBASE1]] = getelementptr inbounds [[BASE]], ptr [[S_BASE:%.+]], i32 0, i32 2 // CK30-DAG: [[S_PTRBASE1_BEGIN]] = getelementptr inbounds i32, ptr [[S_PTRBASE1_BEGIN_REF:%.+]], i{{64|32}} 0 -// CK30-DAG: [[S_PTRBASE1_BEGIN_REF]] = load ptr, ptr [[S_PTRBASE1:%.+]], +// CK30-DAG: [[S_PTRBASE1_BEGIN_REF]] = load ptr, ptr [[S_PTRBASE1:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK30-DAG: [[S_PTRBASE1]] = getelementptr inbounds [[BASE]], ptr [[S_BASE:%.+]], i32 0, i32 2 void map_with_deep_copy() { StructWithPtr s; diff --git a/clang/test/OpenMP/target_parallel_codegen.cpp b/clang/test/OpenMP/target_parallel_codegen.cpp --- a/clang/test/OpenMP/target_parallel_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_codegen.cpp @@ -323,13 +323,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 @@ -337,13 +337,13 @@ // CHECK1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry., i64 -1) // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP7]], i32 0, i32 0 // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP11]]) #[[ATTR4:[0-9]+]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP12:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP12]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -378,13 +378,13 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP13]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP30]], ptr [[A_CASTED2]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[A_CASTED2]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[A_CASTED2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP32]], ptr [[AA_CASTED3]], align 2 -// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[AA_CASTED3]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[AA_CASTED3]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP34]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -433,10 +433,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP54]], ptr [[A_CASTED10]], align 4 -// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[A_CASTED10]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[A_CASTED10]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP56]], 20 // CHECK1-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE19:%.*]] // CHECK1: omp_if.then12: @@ -536,7 +536,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP55]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END20]] // CHECK1: omp_if.end20: -// CHECK1-NEXT: [[TMP103:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP103:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) // CHECK1-NEXT: ret i32 [[TMP103]] @@ -573,40 +573,40 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !21 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !22 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !21 +// CHECK1-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !22 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !21 +// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB1]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK1-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -623,9 +623,9 @@ // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -639,7 +639,7 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: ret void @@ -651,9 +651,9 @@ // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..3, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -667,13 +667,13 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 // CHECK1-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP2]], i32 1) // CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK1-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -693,12 +693,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..4, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -714,10 +714,10 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -748,16 +748,16 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..7, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK1-NEXT: ret void // @@ -788,45 +788,45 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK1-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK1-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK1-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK1-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK1-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP12]] // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i64 3 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK1-NEXT: store i64 [[ADD17]], ptr [[X]], align 8 // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK1-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK1-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -842,27 +842,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -882,20 +882,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -972,9 +972,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -999,16 +999,16 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1069,7 +1069,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP32]] // // @@ -1088,13 +1088,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1149,7 +1149,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1167,13 +1167,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1195,17 +1195,17 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK1-NEXT: store double [[INC]], ptr [[A3]], align 8 // CHECK1-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -1231,15 +1231,15 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..13, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1260,21 +1260,21 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK1-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK1-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: ret void @@ -1292,12 +1292,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..16, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1316,16 +1316,16 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK1-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: ret void @@ -1370,25 +1370,25 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 20, i32 1, ptr @.omp_task_entry., i64 -1) // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP5]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP5]]) -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP9]]) #[[ATTR4:[0-9]+]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP10]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP11]], ptr [[TMP12]], align 4 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -1423,13 +1423,13 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP11]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP28]], ptr [[A_CASTED2]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[A_CASTED2]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[A_CASTED2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED3]], align 2 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED3]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED3]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP32]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -1478,10 +1478,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l119(i32 [[TMP29]], i32 [[TMP31]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP52]], ptr [[A_CASTED10]], align 4 -// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED10]], align 4 -// CHECK3-NEXT: [[TMP54:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED10]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP54:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP54]], 20 // CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE19:%.*]] // CHECK3: omp_if.then12: @@ -1583,7 +1583,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP53]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END20]] // CHECK3: omp_if.end20: -// CHECK3-NEXT: [[TMP103:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP103:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP104:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP104]]) // CHECK3-NEXT: ret i32 [[TMP103]] @@ -1620,40 +1620,40 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !22 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !22 +// CHECK3-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !22 +// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !23 // CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB1]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK3-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -1670,9 +1670,9 @@ // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -1686,7 +1686,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: ret void @@ -1698,9 +1698,9 @@ // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..3, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -1714,13 +1714,13 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 // CHECK3-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP2]], i32 1) // CHECK3-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK3-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -1740,12 +1740,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..4, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -1761,10 +1761,10 @@ // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -1795,16 +1795,16 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..7, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK3-NEXT: ret void // @@ -1835,45 +1835,45 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK3-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK3-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK3-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK3-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK3-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK3-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK3-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP12]] // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i32 3 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK3-NEXT: store i64 [[ADD17]], ptr [[X]], align 4 // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK3-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -1889,27 +1889,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -1929,19 +1929,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2019,9 +2019,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -2046,16 +2046,16 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2116,7 +2116,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP32]] // // @@ -2135,13 +2135,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2196,7 +2196,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -2214,13 +2214,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2242,17 +2242,17 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK3-NEXT: store double [[INC]], ptr [[A3]], align 4 // CHECK3-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -2278,15 +2278,15 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..13, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2307,21 +2307,21 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK3-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK3-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK3-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: ret void @@ -2339,12 +2339,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..16, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2363,16 +2363,16 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK3-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: ret void @@ -2408,9 +2408,9 @@ // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13:![0-9]+]] // CHECK9-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..1, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -2424,13 +2424,13 @@ // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 // CHECK9-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP2]], i32 1) // CHECK9-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK9-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -2450,12 +2450,12 @@ // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // @@ -2471,10 +2471,10 @@ // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -2505,16 +2505,16 @@ // CHECK9-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK9-NEXT: ret void // @@ -2545,45 +2545,45 @@ // CHECK9-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK9-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK9-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK9-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK9-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK9-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK9-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK9-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP12]] // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i64 3 -// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK9-NEXT: store i64 [[ADD17]], ptr [[X]], align 8 // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK9-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK9-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -2606,15 +2606,15 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -2635,21 +2635,21 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK9-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK9-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK9-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: ret void @@ -2669,13 +2669,13 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK9-NEXT: ret void // @@ -2697,17 +2697,17 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK9-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK9-NEXT: store double [[INC]], ptr [[A3]], align 8 // CHECK9-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -2730,12 +2730,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -2754,16 +2754,16 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK9-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: ret void @@ -2792,9 +2792,9 @@ // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14:![0-9]+]] // CHECK11-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..1, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -2808,13 +2808,13 @@ // CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 // CHECK11-NEXT: store i16 [[CONV1]], ptr [[AA_ADDR]], align 2 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_cancel(ptr @[[GLOB1]], i32 [[TMP2]], i32 1) // CHECK11-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK11-NEXT: br i1 [[TMP4]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] @@ -2834,12 +2834,12 @@ // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // @@ -2855,10 +2855,10 @@ // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -2889,16 +2889,16 @@ // CHECK11-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK11-NEXT: ret void // @@ -2929,45 +2929,45 @@ // CHECK11-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK11-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK11-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK11-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK11-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK11-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK11-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK11-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP12]] // CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i32 3 -// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK11-NEXT: store i64 [[ADD17]], ptr [[X]], align 4 // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK11-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK11-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -2990,15 +2990,15 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -3019,21 +3019,21 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK11-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK11-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK11-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK11-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: ret void @@ -3053,13 +3053,13 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK11-NEXT: ret void // @@ -3081,17 +3081,17 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK11-NEXT: store double [[INC]], ptr [[A3]], align 4 // CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -3114,12 +3114,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -3138,16 +3138,16 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK11-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK11-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: ret void diff --git a/clang/test/OpenMP/target_parallel_for_codegen.cpp b/clang/test/OpenMP/target_parallel_for_codegen.cpp --- a/clang/test/OpenMP/target_parallel_for_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_for_codegen.cpp @@ -351,13 +351,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 @@ -390,23 +390,23 @@ // CHECK1: omp_offload.cont: // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP18]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[K]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP20]], ptr [[K_CASTED]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP19]], i64 [[TMP21]]) #[[ATTR4]] // CHECK1-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK1-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP22]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP24]], ptr [[LIN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP26]], ptr [[A_CASTED2]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[A_CASTED2]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = load i64, ptr [[A_CASTED2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP23]], ptr [[TMP28]], align 8 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -428,18 +428,18 @@ // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP40]], ptr [[TMP39]], align 4 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP42]], ptr [[TMP41]], align 4 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP44]], ptr [[TMP43]], align 4 // CHECK1-NEXT: [[TMP45:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP45]], i32 0, i32 0 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP46]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP48]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP45]], i32 0, i32 1 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP49]], i32 0, i32 0 @@ -449,16 +449,16 @@ // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP49]], i32 0, i32 2 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP52]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP49]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 8 // CHECK1-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP45]]) -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP56]], ptr [[A_CASTED3]], align 4 -// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[A_CASTED3]], align 8 -// CHECK1-NEXT: [[TMP58:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[A_CASTED3]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP58]], ptr [[AA_CASTED4]], align 2 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP60]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -507,15 +507,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l136(i64 [[TMP57]], i64 [[TMP59]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP80]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP81]], ptr [[A_CASTED11]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = load i64, ptr [[A_CASTED11]], align 8 -// CHECK1-NEXT: [[TMP83:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP82:%.*]] = load i64, ptr [[A_CASTED11]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP83:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP83]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP84:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP84:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP85]], 20 // CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK1: omp_if.then13: @@ -621,7 +621,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l160(i64 [[TMP82]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END21]] // CHECK1: omp_if.end21: -// CHECK1-NEXT: [[TMP135:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP135:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP136:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP136]]) // CHECK1-NEXT: ret i32 [[TMP135]] @@ -653,29 +653,29 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -695,7 +695,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -720,12 +720,12 @@ // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP2]], ptr [[K_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..1, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -750,14 +750,14 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP0]], ptr [[DOTLINEAR_START]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -766,46 +766,46 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV]] -// CHECK1-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP14]], ptr [[K_ADDR]], align 8 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: @@ -824,15 +824,15 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK1-NEXT: ret void // @@ -862,9 +862,9 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -873,50 +873,50 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK1-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK1-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK1-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK1-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK1-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK1-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -925,7 +925,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -933,13 +933,13 @@ // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: @@ -996,63 +996,63 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !24 +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !24 +// CHECK1-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !24 +// CHECK1-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK1-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: omp_offload.failed.i: -// CHECK1-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK1-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !24 -// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !24 -// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[LIN_CASTED_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !24 -// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[A_CASTED_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !25 +// CHECK1-NEXT: [[TMP29:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !25 +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[LIN_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !25 +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[A_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]] // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK1: .omp_outlined..3.exit: @@ -1068,12 +1068,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1101,37 +1101,37 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK1-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -1140,7 +1140,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1177,19 +1177,19 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK1-NEXT: ret void // @@ -1229,85 +1229,85 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK1-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK1-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK1-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK1-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK1-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK1-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK1-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK1-NEXT: store i64 [[ADD20]], ptr [[X]], align 8 // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK1-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -1316,19 +1316,19 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK1-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] @@ -1345,27 +1345,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -1385,20 +1385,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1475,9 +1475,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -1502,16 +1502,16 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1572,7 +1572,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l214(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP32]] // // @@ -1591,13 +1591,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1652,7 +1652,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l197(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1670,13 +1670,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1705,48 +1705,48 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK1-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK1-NEXT: store double [[INC]], ptr [[A4]], align 8 // CHECK1-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -1758,7 +1758,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK1-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1784,15 +1784,15 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1830,12 +1830,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1866,49 +1866,49 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK1-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1970,12 +1970,12 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 @@ -2007,20 +2007,20 @@ // CHECK3: omp_offload.cont: // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP16]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP17]], ptr [[K]]) #[[ATTR4]] // CHECK3-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP18:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP18]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP20]], ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP22]], ptr [[A_CASTED2]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_CASTED2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP19]], ptr [[TMP24]], align 4 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -2042,18 +2042,18 @@ // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP36:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP36:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP36]], ptr [[TMP35]], align 4 // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP38]], ptr [[TMP37]], align 4 // CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP40:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP40:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP40]], ptr [[TMP39]], align 4 // CHECK3-NEXT: [[TMP41:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP41]], i32 0, i32 0 // CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP42]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP44]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP41]], i32 0, i32 1 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP45]], i32 0, i32 0 @@ -2063,16 +2063,16 @@ // CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP45]], i32 0, i32 2 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP48]], ptr align 4 [[TMP34]], i32 12, i1 false) // CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP45]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP50:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP50:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP50]], ptr [[TMP49]], align 4 // CHECK3-NEXT: [[TMP51:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP41]]) -// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP52]], ptr [[A_CASTED3]], align 4 -// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED3]], align 4 -// CHECK3-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED3]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP54]], ptr [[AA_CASTED4]], align 2 -// CHECK3-NEXT: [[TMP55:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 -// CHECK3-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP55:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP56]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2121,15 +2121,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l136(i32 [[TMP53]], i32 [[TMP55]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP76:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP76:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP76]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP77:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP77:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP77]], ptr [[A_CASTED11]], align 4 -// CHECK3-NEXT: [[TMP78:%.*]] = load i32, ptr [[A_CASTED11]], align 4 -// CHECK3-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP78:%.*]] = load i32, ptr [[A_CASTED11]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP79]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP81:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP81:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 // CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK3: omp_if.then13: @@ -2237,7 +2237,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l160(i32 [[TMP78]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END21]] // CHECK3: omp_if.end21: -// CHECK3-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP134:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP134]]) // CHECK3-NEXT: ret i32 [[TMP133]] @@ -2269,29 +2269,29 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -2311,7 +2311,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2336,9 +2336,9 @@ // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..1, i32 [[TMP2]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2364,14 +2364,14 @@ // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP1]], ptr [[DOTLINEAR_START]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -2380,46 +2380,46 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] -// CHECK3-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP15]], ptr [[TMP0]], align 8 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: @@ -2438,15 +2438,15 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK3-NEXT: ret void // @@ -2476,9 +2476,9 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -2487,50 +2487,50 @@ // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK3-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK3-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK3-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK3-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK3-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK3-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -2539,7 +2539,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK3-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2547,13 +2547,13 @@ // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK3-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: @@ -2610,63 +2610,63 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !25 +// CHECK3-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !25 +// CHECK3-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !26 // CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK3-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK3: omp_offload.failed.i: -// CHECK3-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK3-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !25 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[LIN_CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[A_CASTED_I]], align 4, !noalias !25 +// CHECK3-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !26 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[LIN_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[A_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]] // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK3: .omp_outlined..3.exit: @@ -2682,12 +2682,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2715,37 +2715,37 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK3-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -2754,7 +2754,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2791,19 +2791,19 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK3-NEXT: ret void // @@ -2843,85 +2843,85 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK3-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK3-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK3-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK3-NEXT: store i64 [[ADD20]], ptr [[X]], align 4 // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -2930,19 +2930,19 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK3-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK3-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] @@ -2959,27 +2959,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -2999,19 +2999,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3089,9 +3089,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -3116,16 +3116,16 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3186,7 +3186,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l214(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP32]] // // @@ -3205,13 +3205,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3266,7 +3266,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l197(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -3284,13 +3284,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -3319,48 +3319,48 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK3-NEXT: store double [[INC]], ptr [[A4]], align 4 // CHECK3-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -3372,7 +3372,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK3-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3398,15 +3398,15 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3444,12 +3444,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3480,49 +3480,49 @@ // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK3-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK3-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3566,29 +3566,29 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -3608,7 +3608,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3636,15 +3636,15 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK9-NEXT: ret void // @@ -3674,9 +3674,9 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK9-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -3685,50 +3685,50 @@ // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK9-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK9-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK9-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK9-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK9-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK9-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK9-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK9-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK9-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK9-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -3737,7 +3737,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK9-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3745,13 +3745,13 @@ // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK9-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK9: .omp.linear.pu: -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK9: .omp.linear.pu.done: @@ -3773,12 +3773,12 @@ // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // @@ -3806,37 +3806,37 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK9-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -3845,7 +3845,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3882,19 +3882,19 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK9-NEXT: ret void // @@ -3934,85 +3934,85 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK9: omp.dispatch.cond: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK9: omp.dispatch.body: // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK9-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK9-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK9-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK9-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK9-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK9-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK9-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK9-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK9-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK9-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK9-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK9-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK9-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK9-NEXT: store i64 [[ADD20]], ptr [[X]], align 8 // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK9-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK9-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -4021,19 +4021,19 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK9-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK9: omp.dispatch.inc: -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK9-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK9-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]] @@ -4057,15 +4057,15 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -4105,13 +4105,13 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK9-NEXT: ret void // @@ -4140,48 +4140,48 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK9-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK9-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK9-NEXT: store double [[INC]], ptr [[A4]], align 8 // CHECK9-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -4193,7 +4193,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK9-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4216,12 +4216,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -4252,49 +4252,49 @@ // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK9-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK9-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK9-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK9-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4331,29 +4331,29 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12:![0-9]+]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -4373,7 +4373,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK11-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4401,15 +4401,15 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK11-NEXT: ret void // @@ -4439,9 +4439,9 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK11-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -4450,50 +4450,50 @@ // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK11-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK11-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK11-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK11-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK11-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK11-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK11-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK11-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK11-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK11-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -4502,7 +4502,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK11-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4510,13 +4510,13 @@ // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK11-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK11: .omp.linear.pu: -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK11: .omp.linear.pu.done: @@ -4538,12 +4538,12 @@ // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // @@ -4571,37 +4571,37 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK11-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK11-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -4610,7 +4610,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4647,19 +4647,19 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK11-NEXT: ret void // @@ -4699,85 +4699,85 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK11: omp.dispatch.cond: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK11: omp.dispatch.body: // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK11-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK11-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK11-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK11-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK11-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK11-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK11-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK11-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK11-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK11-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK11-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK11-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK11-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK11-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK11-NEXT: store i64 [[ADD20]], ptr [[X]], align 4 // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK11-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK11-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -4786,19 +4786,19 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK11-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK11: omp.dispatch.inc: -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK11-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK11-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]] @@ -4822,15 +4822,15 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -4870,13 +4870,13 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK11-NEXT: ret void // @@ -4905,48 +4905,48 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK11-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK11-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK11-NEXT: store double [[INC]], ptr [[A4]], align 4 // CHECK11-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -4958,7 +4958,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK11-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4981,12 +4981,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -5017,49 +5017,49 @@ // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK11-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK11-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK11-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK11-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5115,13 +5115,13 @@ // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK17-NEXT: store i32 0, ptr [[A]], align 4 // CHECK17-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK17-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK17-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK17-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK17-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 @@ -5154,23 +5154,23 @@ // CHECK17: omp_offload.cont: // CHECK17-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK17-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP18]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = load i64, ptr [[K]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP20:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP20]], ptr [[K_CASTED]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK17-NEXT: [[TMP21:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP19]], i64 [[TMP21]]) #[[ATTR4]] // CHECK17-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK17-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP22]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP23:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK17-NEXT: [[TMP23:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP24]], ptr [[LIN_CASTED]], align 4 -// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP26]], ptr [[A_CASTED2]], align 4 -// CHECK17-NEXT: [[TMP27:%.*]] = load i64, ptr [[A_CASTED2]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = load i64, ptr [[A_CASTED2]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: store i64 [[TMP23]], ptr [[TMP28]], align 8 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -5192,18 +5192,18 @@ // CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP40]], ptr [[TMP39]], align 4 // CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP42:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK17-NEXT: [[TMP42:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP42]], ptr [[TMP41]], align 4 // CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP44]], ptr [[TMP43]], align 4 // CHECK17-NEXT: [[TMP45:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP45]], i32 0, i32 0 // CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP46]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8 +// CHECK17-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP48]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP45]], i32 0, i32 1 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP49]], i32 0, i32 0 @@ -5213,16 +5213,16 @@ // CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP49]], i32 0, i32 2 // CHECK17-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP52]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP49]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 8 // CHECK17-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP45]]) -// CHECK17-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP56]], ptr [[A_CASTED3]], align 4 -// CHECK17-NEXT: [[TMP57:%.*]] = load i64, ptr [[A_CASTED3]], align 8 -// CHECK17-NEXT: [[TMP58:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP57:%.*]] = load i64, ptr [[A_CASTED3]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP58:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP58]], ptr [[AA_CASTED4]], align 2 -// CHECK17-NEXT: [[TMP59:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 -// CHECK17-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP59:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP60:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP60]], 10 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK17: omp_if.then: @@ -5271,15 +5271,15 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l136(i64 [[TMP57]], i64 [[TMP59]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP80:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP80:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP80]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP81:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP81:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP81]], ptr [[A_CASTED11]], align 4 -// CHECK17-NEXT: [[TMP82:%.*]] = load i64, ptr [[A_CASTED11]], align 8 -// CHECK17-NEXT: [[TMP83:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP82:%.*]] = load i64, ptr [[A_CASTED11]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP83:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP83]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: [[TMP84:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP84:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP85]], 20 // CHECK17-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK17: omp_if.then13: @@ -5385,7 +5385,7 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l160(i64 [[TMP82]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END21]] // CHECK17: omp_if.end21: -// CHECK17-NEXT: [[TMP135:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP135:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP136:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP136]]) // CHECK17-NEXT: ret i32 [[TMP135]] @@ -5417,29 +5417,29 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -5459,7 +5459,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5484,12 +5484,12 @@ // CHECK17-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP2]], ptr [[K_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..1, i64 [[TMP1]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // @@ -5514,14 +5514,14 @@ // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP0]], ptr [[DOTLINEAR_START]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -5530,46 +5530,46 @@ // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK17-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK17-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV]] -// CHECK17-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP12]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK17-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK17: omp.dispatch.end: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK17: .omp.linear.pu: -// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP14]], ptr [[K_ADDR]], align 8 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK17: .omp.linear.pu.done: @@ -5588,15 +5588,15 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // @@ -5626,9 +5626,9 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK17-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK17-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -5637,50 +5637,50 @@ // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK17-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK17-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK17-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK17-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK17-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK17-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK17-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK17-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK17-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -5689,7 +5689,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK17-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5697,13 +5697,13 @@ // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK17-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK17: .omp.linear.pu: -// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK17: .omp.linear.pu.done: @@ -5760,63 +5760,63 @@ // CHECK17-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK17-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK17-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK17-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 +// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK17-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 // CHECK17-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 +// CHECK17-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !25 +// CHECK17-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK17-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !24 +// CHECK17-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK17-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !24 +// CHECK17-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !25 // CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK17-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK17-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK17-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK17-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK17-NEXT: store ptr null, ptr [[TMP23]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr null, ptr [[TMP23]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK17-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !24 +// CHECK17-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK17-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !24 +// CHECK17-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !25 // CHECK17-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK17-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK17-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK17: omp_offload.failed.i: -// CHECK17-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK17-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !24 -// CHECK17-NEXT: [[TMP29:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK17-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !24 -// CHECK17-NEXT: [[TMP31:%.*]] = load i64, ptr [[LIN_CASTED_I]], align 8, !noalias !24 -// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK17-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !24 -// CHECK17-NEXT: [[TMP33:%.*]] = load i64, ptr [[A_CASTED_I]], align 8, !noalias !24 +// CHECK17-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !25 +// CHECK17-NEXT: [[TMP29:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !25 +// CHECK17-NEXT: [[TMP31:%.*]] = load i64, ptr [[LIN_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !25 +// CHECK17-NEXT: [[TMP33:%.*]] = load i64, ptr [[A_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128(i64 [[TMP29]], i64 [[TMP31]], i64 [[TMP33]]) #[[ATTR4]] // CHECK17-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK17: .omp_outlined..3.exit: @@ -5832,12 +5832,12 @@ // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i64 [[TMP1]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // @@ -5865,37 +5865,37 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK17-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK17-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK17-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -5904,7 +5904,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5941,19 +5941,19 @@ // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK17-NEXT: ret void // @@ -5993,85 +5993,85 @@ // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK17-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK17-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK17-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK17-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK17-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK17-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK17-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK17-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK17-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK17-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK17-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK17-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK17-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK17-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK17-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK17-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK17-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK17-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK17-NEXT: store i64 [[ADD20]], ptr [[X]], align 8 // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK17-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK17-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK17-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -6080,19 +6080,19 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK17-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK17-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK17-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] @@ -6109,27 +6109,27 @@ // CHECK17-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK17-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK17-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK17-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK17-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK17-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: ret i32 [[TMP8]] // // @@ -6149,20 +6149,20 @@ // CHECK17-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK17-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK17-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK17-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK17-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK17-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK17-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK17: omp_if.then: @@ -6239,9 +6239,9 @@ // CHECK17-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK17-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK17-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK17-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK17-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK17-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -6266,16 +6266,16 @@ // CHECK17-NEXT: store i32 0, ptr [[A]], align 4 // CHECK17-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK17-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK17: omp_if.then: @@ -6336,7 +6336,7 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l214(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: ret i32 [[TMP32]] // // @@ -6355,13 +6355,13 @@ // CHECK17-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK17-NEXT: store i32 0, ptr [[A]], align 4 // CHECK17-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK17: omp_if.then: @@ -6416,7 +6416,7 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l197(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: ret i32 [[TMP27]] // // @@ -6434,13 +6434,13 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK17-NEXT: ret void // @@ -6469,48 +6469,48 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK17-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK17-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK17-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8 +// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK17-NEXT: store double [[INC]], ptr [[A4]], align 8 // CHECK17-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -6522,7 +6522,7 @@ // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK17-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -6548,15 +6548,15 @@ // CHECK17-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -6594,12 +6594,12 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -6630,49 +6630,49 @@ // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK17-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK17-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK17-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -6734,12 +6734,12 @@ // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[A]], align 4 // CHECK19-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK19-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK19-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK19-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 @@ -6771,20 +6771,20 @@ // CHECK19: omp_offload.cont: // CHECK19-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK19-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP16]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP17]], ptr [[K]]) #[[ATTR4]] // CHECK19-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP18:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP18]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP20]], ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP22]], ptr [[A_CASTED2]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_CASTED2]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_CASTED2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: store i32 [[TMP19]], ptr [[TMP24]], align 4 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -6806,18 +6806,18 @@ // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP36:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP36:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP36]], ptr [[TMP35]], align 4 // CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP38:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP38]], ptr [[TMP37]], align 4 // CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP40:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP40]], ptr [[TMP39]], align 4 // CHECK19-NEXT: [[TMP41:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP41]], i32 0, i32 0 // CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP42]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 4 +// CHECK19-NEXT: [[TMP44:%.*]] = load ptr, ptr [[TMP43]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP44]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP41]], i32 0, i32 1 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP45]], i32 0, i32 0 @@ -6827,16 +6827,16 @@ // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP45]], i32 0, i32 2 // CHECK19-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP48]], ptr align 4 [[TMP34]], i32 12, i1 false) // CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP45]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP50:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP50:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP50]], ptr [[TMP49]], align 4 // CHECK19-NEXT: [[TMP51:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP41]]) -// CHECK19-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP52]], ptr [[A_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP53:%.*]] = load i32, ptr [[A_CASTED3]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP54:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP54]], ptr [[AA_CASTED4]], align 2 -// CHECK19-NEXT: [[TMP55:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 -// CHECK19-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP55:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP56:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP56]], 10 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK19: omp_if.then: @@ -6885,15 +6885,15 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l136(i32 [[TMP53]], i32 [[TMP55]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP76:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP76:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP76]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP77:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP77:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP77]], ptr [[A_CASTED11]], align 4 -// CHECK19-NEXT: [[TMP78:%.*]] = load i32, ptr [[A_CASTED11]], align 4 -// CHECK19-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP78:%.*]] = load i32, ptr [[A_CASTED11]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP79:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP79]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP80:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP81:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 // CHECK19-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK19: omp_if.then13: @@ -7001,7 +7001,7 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l160(i32 [[TMP78]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END21]] // CHECK19: omp_if.end21: -// CHECK19-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP133:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP134:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP134]]) // CHECK19-NEXT: ret i32 [[TMP133]] @@ -7033,29 +7033,29 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -7075,7 +7075,7 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -7100,9 +7100,9 @@ // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..1, i32 [[TMP2]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -7128,14 +7128,14 @@ // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8 +// CHECK19-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i64 [[TMP1]], ptr [[DOTLINEAR_START]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -7144,46 +7144,46 @@ // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK19-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP13]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK19-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] -// CHECK19-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP13]] -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK19-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK19: omp.dispatch.end: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK19: .omp.linear.pu: -// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i64 [[TMP15]], ptr [[TMP0]], align 8 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK19: .omp.linear.pu.done: @@ -7202,15 +7202,15 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // @@ -7240,9 +7240,9 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK19-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK19-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -7251,50 +7251,50 @@ // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK19-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK19-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 // CHECK19-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK19-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8 +// CHECK19-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK19-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK19-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 // CHECK19-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK19-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK19-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 @@ -7303,7 +7303,7 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 // CHECK19-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -7311,13 +7311,13 @@ // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK19-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK19: .omp.linear.pu: -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP20]], ptr [[LIN_ADDR]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP21]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK19: .omp.linear.pu.done: @@ -7374,63 +7374,63 @@ // CHECK19-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK19-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK19-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 +// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 // CHECK19-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 +// CHECK19-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK19-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 +// CHECK19-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK19-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !25 +// CHECK19-NEXT: store i32 3, ptr [[TMP18]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK19-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr [[TMP13]], ptr [[TMP19]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK19-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr [[TMP14]], ptr [[TMP20]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK19-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr [[TMP15]], ptr [[TMP21]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK19-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr @.offload_maptypes, ptr [[TMP22]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK19-NEXT: store ptr null, ptr [[TMP23]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr null, ptr [[TMP23]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK19-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !25 +// CHECK19-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !26 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK19-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !25 +// CHECK19-NEXT: store i64 0, ptr [[TMP25]], align 8, !noalias !26 // CHECK19-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK19-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 // CHECK19-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK19: omp_offload.failed.i: -// CHECK19-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK19-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !25 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK19-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[LIN_CASTED_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK19-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !25 -// CHECK19-NEXT: [[TMP33:%.*]] = load i32, ptr [[A_CASTED_I]], align 4, !noalias !25 +// CHECK19-NEXT: [[TMP28:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: store i16 [[TMP28]], ptr [[AA_CASTED_I]], align 2, !noalias !26 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: store i32 [[TMP30]], ptr [[LIN_CASTED_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[LIN_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: store i32 [[TMP32]], ptr [[A_CASTED_I]], align 4, !noalias !26 +// CHECK19-NEXT: [[TMP33:%.*]] = load i32, ptr [[A_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l128(i32 [[TMP29]], i32 [[TMP31]], i32 [[TMP33]]) #[[ATTR4]] // CHECK19-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK19: .omp_outlined..3.exit: @@ -7446,12 +7446,12 @@ // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i32 [[TMP1]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // @@ -7479,37 +7479,37 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 // CHECK19-NEXT: store i16 [[CONV]], ptr [[IT]], align 2 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK19-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 @@ -7518,7 +7518,7 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -7555,19 +7555,19 @@ // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK19-NEXT: ret void // @@ -7607,85 +7607,85 @@ // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK19-NEXT: store i8 [[CONV]], ptr [[IT]], align 1 -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK19-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK19-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK19-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK19-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4 // CHECK19-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK19-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK19-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK19-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4 // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK19-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK19-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK19-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK19-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK19-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8 +// CHECK19-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK19-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8 // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK19-NEXT: store i64 [[ADD20]], ptr [[X]], align 4 // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK19-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK19-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 @@ -7694,19 +7694,19 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK19-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK19-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK19-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] @@ -7723,27 +7723,27 @@ // CHECK19-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK19-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK19-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK19-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK19-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: ret i32 [[TMP8]] // // @@ -7763,19 +7763,19 @@ // CHECK19-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK19-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK19-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK19-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK19-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK19-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK19: omp_if.then: @@ -7853,9 +7853,9 @@ // CHECK19-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK19-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK19-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK19-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK19-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -7880,16 +7880,16 @@ // CHECK19-NEXT: store i32 0, ptr [[A]], align 4 // CHECK19-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK19-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK19: omp_if.then: @@ -7950,7 +7950,7 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l214(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: ret i32 [[TMP32]] // // @@ -7969,13 +7969,13 @@ // CHECK19-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[A]], align 4 // CHECK19-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK19: omp_if.then: @@ -8030,7 +8030,7 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l197(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: ret i32 [[TMP27]] // // @@ -8048,13 +8048,13 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK19-NEXT: ret void // @@ -8083,48 +8083,48 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK19-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK19-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK19-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK19-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK19-NEXT: store double [[INC]], ptr [[A4]], align 4 // CHECK19-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 @@ -8136,7 +8136,7 @@ // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK19-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -8162,15 +8162,15 @@ // CHECK19-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -8208,12 +8208,12 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -8244,49 +8244,49 @@ // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK19-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK19-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK19-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] diff --git a/clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp b/clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_for_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l14(ptr [[ARGC_ADDR]], ptr [[TMP0]]) #[[ATTR6:[0-9]+]] // CHECK1-NEXT: ret i32 0 // @@ -55,7 +55,7 @@ // CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[TMP0]], ptr [[TMP1]]) // CHECK1-NEXT: ret void // @@ -95,14 +95,14 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -138,198 +138,198 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP57]], 9 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP52]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP53]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP58]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP54]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP59]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP55]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP60]], [[TMP61]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP56]], [[TMP57]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP62]], 1 +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP58]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP63]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP64]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP66:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP66]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4 -// CHECK1-NEXT: [[TMP69:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP68]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP69]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[TMP72]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP73]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP69]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP75]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP77:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP77]], ptr [[TMP76]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4 -// CHECK1-NEXT: [[TMP80:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP79]], ptr [[TMP69]]) +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP59]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP60]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP62:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP62]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[TMP63]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP65:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP64]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP65]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP66]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[TMP67]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP68]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP65]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP69]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP71]], ptr [[TMP70]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = load i32, ptr [[TMP72]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP74:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP73]], ptr [[TMP65]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP81:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP81]], 1 +// CHECK1-NEXT: [[TMP75:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP75]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP82:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP82]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP83]]) +// CHECK1-NEXT: [[TMP76:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP76]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP77]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP79]], i32 1) +// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP80]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP81]], align 8 +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP83:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP82]], align 8 // CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP85]], i32 1) -// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP86]], align 8 -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP90:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP90]], ptr [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[TMP91]], align 4 -// CHECK1-NEXT: [[TMP94:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP92]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP94]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP86:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP85]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP86]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP95:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP95]], [[TMP96]] +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP87]], [[TMP88]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP97]] +// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP89]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP98]] to i32 -// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP90:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP90]] to i32 +// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP91]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP97]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP89]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP92]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP85]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP100:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP101:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP100]] monotonic, align 4 -// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP102]] +// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP93:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP92]] monotonic, align 4 +// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP94]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP103:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP103]] to i32 +// CHECK1-NEXT: [[TMP95:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP95]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP104:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP109:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP104]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP105]] to i32 -// CHECK1-NEXT: [[TMP106:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP106]] to i32 +// CHECK1-NEXT: [[TMP96:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP101:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP96]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP97]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP98]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP108:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP104]], i8 [[TMP107]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP109]] = extractvalue { i8, i1 } [[TMP108]], 0 -// CHECK1-NEXT: [[TMP110:%.*]] = extractvalue { i8, i1 } [[TMP108]], 1 -// CHECK1-NEXT: br i1 [[TMP110]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP100:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP96]], i8 [[TMP99]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP101]] = extractvalue { i8, i1 } [[TMP100]], 0 +// CHECK1-NEXT: [[TMP102:%.*]] = extractvalue { i8, i1 } [[TMP100]], 1 +// CHECK1-NEXT: br i1 [[TMP102]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP102]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP94]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP111:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP111]]) +// CHECK1-NEXT: [[TMP103:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP103]]) // CHECK1-NEXT: ret void // // @@ -340,8 +340,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -352,12 +352,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -370,7 +370,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -392,7 +392,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -401,9 +401,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -446,65 +446,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -516,38 +516,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp @@ -338,13 +338,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 @@ -354,23 +354,23 @@ // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP12]], ptr [[K_CASTED]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR4:[0-9]+]] // CHECK1-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP14]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP16]], ptr [[LIN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP18]], ptr [[A_CASTED2]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED2]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP15]], ptr [[TMP20]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -417,13 +417,13 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP42]], ptr [[A_CASTED3]], align 4 -// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[A_CASTED3]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[A_CASTED3]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP44:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP44]], ptr [[AA_CASTED4]], align 2 -// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP45:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP46]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -472,15 +472,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP43]], i64 [[TMP45]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP66]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP67]], ptr [[A_CASTED11]], align 4 -// CHECK1-NEXT: [[TMP68:%.*]] = load i64, ptr [[A_CASTED11]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP68:%.*]] = load i64, ptr [[A_CASTED11]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP69:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP69]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP71:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP71:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP71]], 20 // CHECK1-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK1: omp_if.then13: @@ -586,7 +586,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP68]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]], i64 [[TMP70]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END21]] // CHECK1: omp_if.end21: -// CHECK1-NEXT: [[TMP121:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP121:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP122:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP122]]) // CHECK1-NEXT: ret i32 [[TMP121]] @@ -618,45 +618,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -680,40 +680,40 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !25 +// CHECK1-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !26 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !25 +// CHECK1-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !26 // CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK1-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -733,12 +733,12 @@ // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP2]], ptr [[K_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -763,14 +763,14 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP0]], ptr [[DOTLINEAR_START]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -779,53 +779,53 @@ // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV]] -// CHECK1-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i32 1, ptr [[I]], align 4 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: -// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP16]], ptr [[K_ADDR]], align 8 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: @@ -844,15 +844,15 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..3, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK1-NEXT: ret void // @@ -882,9 +882,9 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -893,80 +893,80 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK1-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK1-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK1-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK1-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK1-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK1-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK1-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK1-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] +// CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK1-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: @@ -982,12 +982,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1015,54 +1015,54 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK1-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK1-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1098,19 +1098,19 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK1-NEXT: ret void // @@ -1150,112 +1150,112 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK1-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK1-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK1-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK1-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK1-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK1-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK1-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK1-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK1-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK1-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK1-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK1-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1273,27 +1273,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -1313,20 +1313,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1403,9 +1403,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -1430,16 +1430,16 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1500,7 +1500,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP32]] // // @@ -1519,13 +1519,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1580,7 +1580,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1598,13 +1598,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1633,68 +1633,68 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK1-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK1-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP14]] // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 -// CHECK1-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] +// CHECK1-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1719,15 +1719,15 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1765,12 +1765,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1801,57 +1801,57 @@ // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK1-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] +// CHECK1-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1912,12 +1912,12 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 @@ -1926,20 +1926,20 @@ // CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP5]]) // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP9]], ptr [[K]]) #[[ATTR4:[0-9]+]] // CHECK3-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP10]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP12]], ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP14]], ptr [[A_CASTED2]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A_CASTED2]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A_CASTED2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP11]], ptr [[TMP16]], align 4 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -1986,13 +1986,13 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP11]], i32 [[TMP13]], i32 [[TMP15]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP38]], ptr [[A_CASTED3]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[A_CASTED3]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[A_CASTED3]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP40]], ptr [[AA_CASTED4]], align 2 -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP42]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2041,15 +2041,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP39]], i32 [[TMP41]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP62]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP63]], ptr [[A_CASTED11]], align 4 -// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[A_CASTED11]], align 4 -// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[A_CASTED11]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP65]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP67]], 20 // CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK3: omp_if.then13: @@ -2157,7 +2157,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP64]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]], i32 [[TMP66]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END21]] // CHECK3: omp_if.end21: -// CHECK3-NEXT: [[TMP119:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP119:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP120:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP120]]) // CHECK3-NEXT: ret i32 [[TMP119]] @@ -2189,45 +2189,45 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK3-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2251,40 +2251,40 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !26 +// CHECK3-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !27 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !26 +// CHECK3-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !27 // CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK3-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -2304,9 +2304,9 @@ // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP2]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2332,14 +2332,14 @@ // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP1]], ptr [[DOTLINEAR_START]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -2348,53 +2348,53 @@ // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] -// CHECK3-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i32 1, ptr [[I]], align 4 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: -// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP17]], ptr [[TMP0]], align 8 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: @@ -2413,15 +2413,15 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..3, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK3-NEXT: ret void // @@ -2451,9 +2451,9 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -2462,80 +2462,80 @@ // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK3-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK3-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK3-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK3-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK3-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK3-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK3-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK3-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK3-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK3-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK3-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK3-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: @@ -2551,12 +2551,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2584,54 +2584,54 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK3-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK3-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2667,19 +2667,19 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK3-NEXT: ret void // @@ -2719,112 +2719,112 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK3-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP37]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK3-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK3-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK3-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK3-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK3-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK3-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK3-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2842,27 +2842,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -2882,19 +2882,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2972,9 +2972,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -2999,16 +2999,16 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3069,7 +3069,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP32]] // // @@ -3088,13 +3088,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3149,7 +3149,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -3167,13 +3167,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -3202,68 +3202,68 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP40]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK3-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK3-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP14]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 -// CHECK3-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK3-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3288,15 +3288,15 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3334,12 +3334,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3370,57 +3370,57 @@ // CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK3-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP43]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK3-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK3-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3482,13 +3482,13 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK5-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK5-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 @@ -3498,23 +3498,23 @@ // CHECK5-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK5-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK5-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP10]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP12]], ptr [[K_CASTED]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK5-NEXT: [[TMP13:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR4:[0-9]+]] // CHECK5-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP14]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP15:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP16]], ptr [[LIN_CASTED]], align 4 -// CHECK5-NEXT: [[TMP17:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP18]], ptr [[A_CASTED2]], align 4 -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED2]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[A_CASTED2]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store i64 [[TMP15]], ptr [[TMP20]], align 8 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -3561,13 +3561,13 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP42]], ptr [[A_CASTED3]], align 4 -// CHECK5-NEXT: [[TMP43:%.*]] = load i64, ptr [[A_CASTED3]], align 8 -// CHECK5-NEXT: [[TMP44:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP43:%.*]] = load i64, ptr [[A_CASTED3]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP44:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP44]], ptr [[AA_CASTED4]], align 2 -// CHECK5-NEXT: [[TMP45:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 -// CHECK5-NEXT: [[TMP46:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP45:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP46:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP46]], 10 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -3616,15 +3616,15 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP43]], i64 [[TMP45]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP66:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP66:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP66]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP67:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP67:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP67]], ptr [[A_CASTED11]], align 4 -// CHECK5-NEXT: [[TMP68:%.*]] = load i64, ptr [[A_CASTED11]], align 8 -// CHECK5-NEXT: [[TMP69:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP68:%.*]] = load i64, ptr [[A_CASTED11]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP69:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP69]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK5-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: [[TMP71:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP70:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP71:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP71]], 20 // CHECK5-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK5: omp_if.then13: @@ -3730,7 +3730,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP68]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]], i64 [[TMP70]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END21]] // CHECK5: omp_if.end21: -// CHECK5-NEXT: [[TMP121:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP121:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP122:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP122]]) // CHECK5-NEXT: ret i32 [[TMP121]] @@ -3762,45 +3762,45 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK5-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -3824,40 +3824,40 @@ // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 -// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 -// CHECK5-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK5-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !25 +// CHECK5-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !26 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK5-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP10]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK5-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP11]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP12]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP13]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP14]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !25 +// CHECK5-NEXT: store ptr null, ptr [[TMP15]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK5-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !25 +// CHECK5-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !26 // CHECK5-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK5-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK5-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -3877,12 +3877,12 @@ // CHECK5-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP2]], ptr [[K_CASTED]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[K_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK5-NEXT: ret void // @@ -3907,14 +3907,14 @@ // CHECK5-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[K]], ptr [[K_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load i64, ptr [[K_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP0]], ptr [[DOTLINEAR_START]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -3923,53 +3923,53 @@ // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK5-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK5-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV]] -// CHECK5-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i32 1, ptr [[I]], align 4 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK5: .omp.linear.pu: -// CHECK5-NEXT: [[TMP16:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK5-NEXT: [[TMP16:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP16]], ptr [[K_ADDR]], align 8 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK5: .omp.linear.pu.done: @@ -3988,15 +3988,15 @@ // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..3, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK5-NEXT: ret void // @@ -4026,9 +4026,9 @@ // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK5-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -4037,80 +4037,80 @@ // CHECK5-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK5-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK5-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK5-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK5-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK5-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK5-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK5-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK5-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK5-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK5-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK5-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK5-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK5-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] +// CHECK5-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK5-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK5-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK5: .omp.linear.pu: -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK5: .omp.linear.pu.done: @@ -4126,12 +4126,12 @@ // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i64 [[TMP1]], i64 [[TMP3]]) // CHECK5-NEXT: ret void // @@ -4159,54 +4159,54 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK5-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP32]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK5-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK5-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4242,19 +4242,19 @@ // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK5-NEXT: ret void // @@ -4294,112 +4294,112 @@ // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK5: omp.dispatch.cond: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK5-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK5-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK5-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK5-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP35]] -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK5-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK5-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK5-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK5-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK5-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK5-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK5-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK5-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK5-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK5-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK5-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK5-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK5-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK5-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK5-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK5-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK5-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK5-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK5-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: -// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK5-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK5-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK5-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4417,27 +4417,27 @@ // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK5-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK5-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK5-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP8]] // // @@ -4459,29 +4459,29 @@ // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK5-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[B]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[B_CASTED]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK5-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP10]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4530,7 +4530,7 @@ // CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [6 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP35:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP35:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP35]] to i1 // CHECK5-NEXT: [[TMP36:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -4567,9 +4567,9 @@ // CHECK5-NEXT: [[TMP48:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP48]] // CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: [[TMP49:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2 +// CHECK5-NEXT: [[TMP49:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP49]] to i32 -// CHECK5-NEXT: [[TMP50:%.*]] = load i32, ptr [[B]], align 4 +// CHECK5-NEXT: [[TMP50:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], [[TMP50]] // CHECK5-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) @@ -4594,16 +4594,16 @@ // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK5-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4664,7 +4664,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP32]] // // @@ -4683,13 +4683,13 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4744,7 +4744,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP27]] // // @@ -4768,19 +4768,19 @@ // CHECK5-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4823,104 +4823,104 @@ // CHECK5-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK5-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK5-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP6]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK5-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK5-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP38]] -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK5-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: store double [[ADD]], ptr [[A]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: store double [[ADD]], ptr [[A]], align 8, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] // CHECK5-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !40, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK5-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] // CHECK5-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK5-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP15]] // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP39]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD7:%.*]] = add i64 [[TMP16]], 1 -// CHECK5-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK5-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP18]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP8:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK5-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]] // CHECK5: cond.true9: // CHECK5-NEXT: br label [[COND_END11:%.*]] // CHECK5: cond.false10: -// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END11]] // CHECK5: cond.end11: // CHECK5-NEXT: [[COND12:%.*]] = phi i64 [ 3, [[COND_TRUE9]] ], [ [[TMP20]], [[COND_FALSE10]] ] // CHECK5-NEXT: store i64 [[COND12]], ptr [[DOTOMP_UB]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK5: omp.inner.for.cond13: -// CHECK5-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP14:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK5-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK5: omp.inner.for.body15: -// CHECK5-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK5-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL16:%.*]] = mul i64 [[TMP24]], 400 // CHECK5-NEXT: [[SUB17:%.*]] = sub i64 2000, [[MUL16]] // CHECK5-NEXT: store i64 [[SUB17]], ptr [[IT]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV18:%.*]] = sitofp i32 [[TMP25]] to double // CHECK5-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], 1.500000e+00 // CHECK5-NEXT: [[A20:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: store double [[ADD19]], ptr [[A20]], align 8 // CHECK5-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 8 +// CHECK5-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[INC22:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK5-NEXT: store double [[INC22]], ptr [[A21]], align 8 // CHECK5-NEXT: [[CONV23:%.*]] = fptosi double [[INC22]] to i16 @@ -4932,19 +4932,19 @@ // CHECK5: omp.body.continue26: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK5: omp.inner.for.inc27: -// CHECK5-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK5-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD28:%.*]] = add i64 [[TMP28]], 1 // CHECK5-NEXT: store i64 [[ADD28]], ptr [[DOTOMP_IV]], align 8 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP42:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK5: omp.inner.for.end29: // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK5-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4969,15 +4969,15 @@ // CHECK5-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK5-NEXT: ret void // @@ -5015,12 +5015,12 @@ // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK5-NEXT: ret void // @@ -5051,57 +5051,57 @@ // CHECK5-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK5-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK5-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK5-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] +// CHECK5-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -5162,12 +5162,12 @@ // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK7-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK7-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 @@ -5176,20 +5176,20 @@ // CHECK7-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP5]]) // CHECK7-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK7-NEXT: store i64 [[CALL]], ptr [[K]], align 8 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP9]], ptr [[K]]) #[[ATTR4:[0-9]+]] // CHECK7-NEXT: store i32 12, ptr [[LIN]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP10]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP12]], ptr [[LIN_CASTED]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP14]], ptr [[A_CASTED2]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[A_CASTED2]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[A_CASTED2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store i32 [[TMP11]], ptr [[TMP16]], align 4 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -5236,13 +5236,13 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP11]], i32 [[TMP13]], i32 [[TMP15]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: -// CHECK7-NEXT: [[TMP38:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP38:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP38]], ptr [[A_CASTED3]], align 4 -// CHECK7-NEXT: [[TMP39:%.*]] = load i32, ptr [[A_CASTED3]], align 4 -// CHECK7-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP39:%.*]] = load i32, ptr [[A_CASTED3]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP40]], ptr [[AA_CASTED4]], align 2 -// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 -// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP42]], 10 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -5291,15 +5291,15 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP39]], i32 [[TMP41]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP62:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP62:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP62]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP63]], ptr [[A_CASTED11]], align 4 -// CHECK7-NEXT: [[TMP64:%.*]] = load i32, ptr [[A_CASTED11]], align 4 -// CHECK7-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP64:%.*]] = load i32, ptr [[A_CASTED11]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP65:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP65]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP66:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP67]], 20 // CHECK7-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK7: omp_if.then13: @@ -5407,7 +5407,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP64]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]], i32 [[TMP66]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END21]] // CHECK7: omp_if.end21: -// CHECK7-NEXT: [[TMP119:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP119:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP120:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP120]]) // CHECK7-NEXT: ret i32 [[TMP119]] @@ -5439,45 +5439,45 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK7-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -5501,40 +5501,40 @@ // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK7-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK7-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 -// CHECK7-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 +// CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK7-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK7-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !26 +// CHECK7-NEXT: store i32 0, ptr [[TMP9]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK7-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP10]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK7-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP11]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP12]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP13]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP14]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !26 +// CHECK7-NEXT: store ptr null, ptr [[TMP15]], align 4, !noalias !27 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK7-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !26 +// CHECK7-NEXT: store i64 0, ptr [[TMP16]], align 8, !noalias !27 // CHECK7-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK7-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 // CHECK7-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] @@ -5554,9 +5554,9 @@ // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP2]], ptr [[TMP0]]) // CHECK7-NEXT: ret void // @@ -5582,14 +5582,14 @@ // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: store ptr [[K]], ptr [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[K_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8 +// CHECK7-NEXT: [[TMP1:%.*]] = load i64, ptr [[TMP0]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP1]], ptr [[DOTLINEAR_START]], align 8 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 8, ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK7-NEXT: call void @__kmpc_dispatch_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] @@ -5598,53 +5598,53 @@ // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] -// CHECK7-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: store i32 [[SUB]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK7-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] -// CHECK7-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: store i64 [[ADD]], ptr [[K1]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i32 1, ptr [[I]], align 4 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK7-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK7: .omp.linear.pu: -// CHECK7-NEXT: [[TMP17:%.*]] = load i64, ptr [[K1]], align 8 +// CHECK7-NEXT: [[TMP17:%.*]] = load i64, ptr [[K1]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP17]], ptr [[TMP0]], align 8 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK7: .omp.linear.pu.done: @@ -5663,15 +5663,15 @@ // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..3, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK7-NEXT: ret void // @@ -5701,9 +5701,9 @@ // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK7-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -5712,80 +5712,80 @@ // CHECK7-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3]], i32 [[TMP3]]) // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK7-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK7-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK7-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK7-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK7-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK7-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK7-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK7-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK7-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK7-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK7-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK7-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK7-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK7-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK7-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK7-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK7-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK7-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK7-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK7: .omp.linear.pu: -// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK7: .omp.linear.pu.done: @@ -5801,12 +5801,12 @@ // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..4, i32 [[TMP1]], i32 [[TMP3]]) // CHECK7-NEXT: ret void // @@ -5834,54 +5834,54 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK7-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK7-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK7-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK7-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK7-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -5917,19 +5917,19 @@ // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..7, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK7-NEXT: ret void // @@ -5969,112 +5969,112 @@ // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK7: omp.dispatch.cond: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK7-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK7-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK7-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK7-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK7-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK7-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK7-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK7-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK7-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK7-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK7-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK7-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK7-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK7-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK7-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK7-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK7-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK7-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK7-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK7-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK7-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK7-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: -// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK7-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK7-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK7-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6092,27 +6092,27 @@ // CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK7-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK7-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK7-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP8]] // // @@ -6134,28 +6134,28 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK7-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK7-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6205,7 +6205,7 @@ // CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [6 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP35:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP35:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP35]] to i1 // CHECK7-NEXT: [[TMP36:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -6242,9 +6242,9 @@ // CHECK7-NEXT: [[TMP48:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP48]] // CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: [[TMP49:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2 +// CHECK7-NEXT: [[TMP49:%.*]] = load i16, ptr [[ARRAYIDX5]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP49]] to i32 -// CHECK7-NEXT: [[TMP50:%.*]] = load i32, ptr [[B]], align 4 +// CHECK7-NEXT: [[TMP50:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV]], [[TMP50]] // CHECK7-NEXT: [[TMP51:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP51]]) @@ -6269,16 +6269,16 @@ // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK7-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6339,7 +6339,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP32]] // // @@ -6358,13 +6358,13 @@ // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6419,7 +6419,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP27]] // // @@ -6443,19 +6443,19 @@ // CHECK7-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6498,104 +6498,104 @@ // CHECK7-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK7-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK7-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP6]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK7-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK7-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK7-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK7-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: store double [[ADD]], ptr [[A]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: store double [[ADD]], ptr [[A]], align 4, !nontemporal !41, !llvm.access.group [[ACC_GRP40]] // CHECK7-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !41, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK7-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !41, !llvm.access.group [[ACC_GRP40]] // CHECK7-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK7-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP15]] // CHECK7-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP40]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD7:%.*]] = add i64 [[TMP16]], 1 -// CHECK7-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] +// CHECK7-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP18]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK7-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP8:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK7-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]] // CHECK7: cond.true9: // CHECK7-NEXT: br label [[COND_END11:%.*]] // CHECK7: cond.false10: -// CHECK7-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END11]] // CHECK7: cond.end11: // CHECK7-NEXT: [[COND12:%.*]] = phi i64 [ 3, [[COND_TRUE9]] ], [ [[TMP20]], [[COND_FALSE10]] ] // CHECK7-NEXT: store i64 [[COND12]], ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK7: omp.inner.for.cond13: -// CHECK7-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK7-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP14:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK7-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK7: omp.inner.for.body15: -// CHECK7-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK7-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL16:%.*]] = mul i64 [[TMP24]], 400 // CHECK7-NEXT: [[SUB17:%.*]] = sub i64 2000, [[MUL16]] // CHECK7-NEXT: store i64 [[SUB17]], ptr [[IT]], align 8 -// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV18:%.*]] = sitofp i32 [[TMP25]] to double // CHECK7-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], 1.500000e+00 // CHECK7-NEXT: [[A20:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: store double [[ADD19]], ptr [[A20]], align 4 // CHECK7-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[INC22:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK7-NEXT: store double [[INC22]], ptr [[A21]], align 4 // CHECK7-NEXT: [[CONV23:%.*]] = fptosi double [[INC22]] to i16 @@ -6607,19 +6607,19 @@ // CHECK7: omp.body.continue26: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK7: omp.inner.for.inc27: -// CHECK7-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK7-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD28:%.*]] = add i64 [[TMP28]], 1 // CHECK7-NEXT: store i64 [[ADD28]], ptr [[DOTOMP_IV]], align 8 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK7: omp.inner.for.end29: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK7-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6644,15 +6644,15 @@ // CHECK7-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..13, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK7-NEXT: ret void // @@ -6690,12 +6690,12 @@ // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..16, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK7-NEXT: ret void // @@ -6726,57 +6726,57 @@ // CHECK7-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK7-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK7-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK7-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK7-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45:![0-9]+]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP46:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK7-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK7-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK7-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] +// CHECK7-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6849,238 +6849,238 @@ // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, ptr [[A]], align 4 // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK9-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK9-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 5, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 33, ptr [[I]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK9-NEXT: store i64 [[CALL]], ptr [[K]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 // CHECK9-NEXT: store i32 8, ptr [[DOTOMP_UB5]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV6]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i64 [[TMP12]], ptr [[DOTLINEAR_START]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK9: omp.inner.for.cond9: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK9: omp.inner.for.body11: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] -// CHECK9-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] -// CHECK9-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK9-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK9: omp.body.continue16: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK9: omp.inner.for.inc17: -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK9-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK9: omp.inner.for.end19: // CHECK9-NEXT: store i32 1, ptr [[I7]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[K8]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[K8]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i64 [[TMP20]], ptr [[K]], align 8 // CHECK9-NEXT: store i32 12, ptr [[LIN]], align 4 // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB21]], align 8 // CHECK9-NEXT: store i64 3, ptr [[DOTOMP_UB22]], align 8 -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV23]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP22]], ptr [[DOTLINEAR_START24]], align 4 -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP23]], ptr [[DOTLINEAR_START25]], align 4 // CHECK9-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK9-NEXT: store i64 [[CALL26]], ptr [[DOTLINEAR_STEP]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK9: omp.inner.for.cond29: -// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] // CHECK9-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK9: omp.inner.for.body31: -// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 // CHECK9-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] -// CHECK9-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 -// CHECK9-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] // CHECK9-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK9-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK9-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 -// CHECK9-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] // CHECK9-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK9-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 -// CHECK9-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP33:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP33:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 // CHECK9-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK9-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 -// CHECK9-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK9: omp.body.continue45: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK9: omp.inner.for.inc46: -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 -// CHECK9-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end48: // CHECK9-NEXT: store i64 400, ptr [[IT]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[LIN27]], align 4 +// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[LIN27]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP35]], ptr [[LIN]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[A28]], align 4 +// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[A28]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP36]], ptr [[A]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB50]], align 4 // CHECK9-NEXT: store i32 3, ptr [[DOTOMP_UB51]], align 4 -// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP37]], ptr [[DOTOMP_IV52]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK9: omp.inner.for.cond54: -// CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK9-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] // CHECK9-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK9: omp.inner.for.body56: -// CHECK9-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 // CHECK9-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK9-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 -// CHECK9-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 -// CHECK9-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: [[TMP42:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP42:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 // CHECK9-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK9-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 -// CHECK9-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK9: omp.body.continue64: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK9: omp.inner.for.inc65: -// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 -// CHECK9-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK9: omp.inner.for.end67: // CHECK9-NEXT: store i16 22, ptr [[IT53]], align 2 -// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP44]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB69]], align 4 // CHECK9-NEXT: store i32 25, ptr [[DOTOMP_UB70]], align 4 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4 +// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP45]], ptr [[DOTOMP_IV71]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK9: omp.inner.for.cond73: -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] // CHECK9-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK9: omp.inner.for.body75: -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 // CHECK9-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK9-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 -// CHECK9-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 -// CHECK9-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double // CHECK9-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK9-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float -// CHECK9-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, ptr [[VLA]], i64 3 -// CHECK9-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double // CHECK9-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK9-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float -// CHECK9-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX87]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK9-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i64 [[TMP53]] // CHECK9-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX90]], i64 3 -// CHECK9-NEXT: [[TMP54:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP54:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP55:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 -// CHECK9-NEXT: store i64 [[ADD93]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i64 [[ADD93]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP56:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP56:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 // CHECK9-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK9-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 -// CHECK9-NEXT: store i8 [[CONV96]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i8 [[CONV96]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK9: omp.body.continue97: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK9: omp.inner.for.inc98: -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 -// CHECK9-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK9: omp.inner.for.end100: // CHECK9-NEXT: store i8 96, ptr [[IT72]], align 1 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) // CHECK9-NEXT: ret i32 [[TMP58]] @@ -7094,27 +7094,27 @@ // CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK9-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK9-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP8]] // // @@ -7134,10 +7134,10 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -7146,49 +7146,49 @@ // CHECK9-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK9-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double // CHECK9-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: store double [[ADD2]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store double [[ADD2]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load double, ptr [[A3]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP10:%.*]] = load double, ptr [[A3]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 -// CHECK9-NEXT: store double [[INC]], ptr [[A3]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store double [[INC]], ptr [[A3]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 // CHECK9-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP11]] // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK9-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 -// CHECK9-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK9-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP13]] // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX7]], i64 1 -// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2 +// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] // CHECK9-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) @@ -7212,7 +7212,7 @@ // CHECK9-NEXT: store i8 0, ptr [[AAA]], align 1 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 429496720, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP0]] // // @@ -7233,42 +7233,42 @@ // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 6, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i64 [[TMP0]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK9-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK9-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK9-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 -// CHECK9-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i64 11, ptr [[I]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP8]] // // @@ -7328,236 +7328,236 @@ // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK11-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK11-NEXT: store i32 [[TMP2]], ptr [[__VLA_EXPR1]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 5, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 33, ptr [[I]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK11-NEXT: store i64 [[CALL]], ptr [[K]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 // CHECK11-NEXT: store i32 8, ptr [[DOTOMP_UB5]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV6]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i64, ptr [[K]], align 8 +// CHECK11-NEXT: [[TMP10:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i64 [[TMP10]], ptr [[DOTLINEAR_START]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK11: omp.inner.for.cond9: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK11: omp.inner.for.body11: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] -// CHECK11-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]] -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] -// CHECK11-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK11: omp.body.continue16: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK11: omp.inner.for.inc17: -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 -// CHECK11-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK11: omp.inner.for.end19: // CHECK11-NEXT: store i32 1, ptr [[I7]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[K8]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[K8]], align 8, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i64 [[TMP18]], ptr [[K]], align 8 // CHECK11-NEXT: store i32 12, ptr [[LIN]], align 4 // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB21]], align 8 // CHECK11-NEXT: store i64 3, ptr [[DOTOMP_UB22]], align 8 -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i64 [[TMP19]], ptr [[DOTOMP_IV23]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP20]], ptr [[DOTLINEAR_START24]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP21]], ptr [[DOTLINEAR_START25]], align 4 // CHECK11-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK11-NEXT: store i64 [[CALL26]], ptr [[DOTLINEAR_STEP]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK11: omp.inner.for.cond29: -// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK11-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK11: omp.inner.for.body31: -// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 // CHECK11-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] -// CHECK11-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 -// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] // CHECK11-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK11-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK11-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK11-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] // CHECK11-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK11-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 -// CHECK11-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 // CHECK11-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK11-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 -// CHECK11-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK11: omp.body.continue45: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK11: omp.inner.for.inc46: -// CHECK11-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 -// CHECK11-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end48: // CHECK11-NEXT: store i64 400, ptr [[IT]], align 8 -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[LIN27]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[LIN27]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP33]], ptr [[LIN]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[A28]], align 4 +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[A28]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP34]], ptr [[A]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB50]], align 4 // CHECK11-NEXT: store i32 3, ptr [[DOTOMP_UB51]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP35]], ptr [[DOTOMP_IV52]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK11: omp.inner.for.cond54: -// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK11-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] // CHECK11-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK11: omp.inner.for.body56: -// CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 // CHECK11-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK11-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 -// CHECK11-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: [[TMP39:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 -// CHECK11-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 // CHECK11-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK11-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 -// CHECK11-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK11: omp.body.continue64: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK11: omp.inner.for.inc65: -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 -// CHECK11-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK11: omp.inner.for.end67: // CHECK11-NEXT: store i16 22, ptr [[IT53]], align 2 -// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB69]], align 4 // CHECK11-NEXT: store i32 25, ptr [[DOTOMP_UB70]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4 +// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP43]], ptr [[DOTOMP_IV71]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK11: omp.inner.for.cond73: -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] // CHECK11-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK11: omp.inner.for.body75: -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 // CHECK11-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK11-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 -// CHECK11-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 -// CHECK11-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double // CHECK11-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK11-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float -// CHECK11-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, ptr [[VLA]], i32 3 -// CHECK11-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double // CHECK11-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK11-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float -// CHECK11-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX87]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP50:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK11-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i32 [[TMP51]] // CHECK11-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX90]], i32 3 -// CHECK11-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP53:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP53:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 -// CHECK11-NEXT: store i64 [[ADD93]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i64 [[ADD93]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP54:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP54:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 // CHECK11-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK11-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 -// CHECK11-NEXT: store i8 [[CONV96]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i8 [[CONV96]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK11: omp.body.continue97: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK11: omp.inner.for.inc98: -// CHECK11-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 -// CHECK11-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK11: omp.inner.for.end100: // CHECK11-NEXT: store i8 96, ptr [[IT72]], align 1 -// CHECK11-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP57:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP57]]) // CHECK11-NEXT: ret i32 [[TMP56]] @@ -7571,27 +7571,27 @@ // CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK11-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK11-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP8]] // // @@ -7611,10 +7611,10 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] @@ -7622,49 +7622,49 @@ // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i64 [[TMP4]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK11-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double // CHECK11-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: store double [[ADD2]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store double [[ADD2]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP9:%.*]] = load double, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP9:%.*]] = load double, ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 -// CHECK11-NEXT: store double [[INC]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store double [[INC]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 // CHECK11-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP10]] // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK11-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store i16 [[CONV4]], ptr [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 -// CHECK11-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP12]] // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX7]], i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2 +// CHECK11-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX8]], align 2, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] // CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) @@ -7688,7 +7688,7 @@ // CHECK11-NEXT: store i8 0, ptr [[AAA]], align 1 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 429496720, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP0]] // // @@ -7709,42 +7709,42 @@ // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 6, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i64 [[TMP0]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK11-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK11-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP23]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK11-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 -// CHECK11-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i64 11, ptr [[I]], align 8 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP8]] // // @@ -7804,238 +7804,238 @@ // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[A]], align 4 // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK13-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 5, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 33, ptr [[I]], align 4 // CHECK13-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK13-NEXT: store i64 [[CALL]], ptr [[K]], align 8 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 // CHECK13-NEXT: store i32 8, ptr [[DOTOMP_UB5]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV6]], align 4 -// CHECK13-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8 +// CHECK13-NEXT: [[TMP12:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP12]], ptr [[DOTLINEAR_START]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK13: omp.inner.for.cond9: -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK13-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK13: omp.inner.for.body11: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] -// CHECK13-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] -// CHECK13-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK13-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK13: omp.body.continue16: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK13: omp.inner.for.inc17: -// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK13-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end19: // CHECK13-NEXT: store i32 1, ptr [[I7]], align 4 -// CHECK13-NEXT: [[TMP20:%.*]] = load i64, ptr [[K8]], align 8 +// CHECK13-NEXT: [[TMP20:%.*]] = load i64, ptr [[K8]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP20]], ptr [[K]], align 8 // CHECK13-NEXT: store i32 12, ptr [[LIN]], align 4 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB21]], align 8 // CHECK13-NEXT: store i64 3, ptr [[DOTOMP_UB22]], align 8 -// CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV23]], align 8 -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP22]], ptr [[DOTLINEAR_START24]], align 4 -// CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP23]], ptr [[DOTLINEAR_START25]], align 4 // CHECK13-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK13-NEXT: store i64 [[CALL26]], ptr [[DOTLINEAR_STEP]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK13: omp.inner.for.cond29: -// CHECK13-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] // CHECK13-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK13: omp.inner.for.body31: -// CHECK13-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 // CHECK13-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] -// CHECK13-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 -// CHECK13-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] // CHECK13-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK13-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK13-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 -// CHECK13-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] // CHECK13-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK13-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 -// CHECK13-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: [[TMP33:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP33:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 // CHECK13-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK13-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 -// CHECK13-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK13: omp.body.continue45: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK13: omp.inner.for.inc46: -// CHECK13-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 -// CHECK13-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK13: omp.inner.for.end48: // CHECK13-NEXT: store i64 400, ptr [[IT]], align 8 -// CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[LIN27]], align 4 +// CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[LIN27]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP35]], ptr [[LIN]], align 4 -// CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[A28]], align 4 +// CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[A28]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP36]], ptr [[A]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB50]], align 4 // CHECK13-NEXT: store i32 3, ptr [[DOTOMP_UB51]], align 4 -// CHECK13-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4 +// CHECK13-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP37]], ptr [[DOTOMP_IV52]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK13: omp.inner.for.cond54: -// CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] // CHECK13-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK13: omp.inner.for.body56: -// CHECK13-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 // CHECK13-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK13-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 -// CHECK13-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP12]] -// CHECK13-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 -// CHECK13-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK13-NEXT: [[TMP42:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: [[TMP42:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 // CHECK13-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK13-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 -// CHECK13-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK13: omp.body.continue64: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK13: omp.inner.for.inc65: -// CHECK13-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 -// CHECK13-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK13: omp.inner.for.end67: // CHECK13-NEXT: store i16 22, ptr [[IT53]], align 2 -// CHECK13-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP44]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB69]], align 4 // CHECK13-NEXT: store i32 25, ptr [[DOTOMP_UB70]], align 4 -// CHECK13-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4 +// CHECK13-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP45]], ptr [[DOTOMP_IV71]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK13: omp.inner.for.cond73: -// CHECK13-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK13-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP47:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] // CHECK13-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK13: omp.inner.for.body75: -// CHECK13-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP48:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 // CHECK13-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK13-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 -// CHECK13-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP15]] -// CHECK13-NEXT: [[TMP49:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP49:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 -// CHECK13-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double // CHECK13-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK13-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float -// CHECK13-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, ptr [[VLA]], i64 3 -// CHECK13-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP51:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double // CHECK13-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK13-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float -// CHECK13-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i64 0, i64 1 // CHECK13-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX87]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 -// CHECK13-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK13-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i64 [[TMP53]] // CHECK13-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX90]], i64 3 -// CHECK13-NEXT: [[TMP54:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP54:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 -// CHECK13-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP55:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP55:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 -// CHECK13-NEXT: store i64 [[ADD93]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store i64 [[ADD93]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP56:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP56:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 // CHECK13-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK13-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 -// CHECK13-NEXT: store i8 [[CONV96]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store i8 [[CONV96]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK13: omp.body.continue97: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK13: omp.inner.for.inc98: -// CHECK13-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP57:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 -// CHECK13-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK13: omp.inner.for.end100: // CHECK13-NEXT: store i8 96, ptr [[IT72]], align 1 -// CHECK13-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP58:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) // CHECK13-NEXT: ret i32 [[TMP58]] @@ -8049,27 +8049,27 @@ // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK13-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK13-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK13-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK13-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP8]] // // @@ -8090,82 +8090,82 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK13-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK13-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK13-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 -// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK13: omp_if.then: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK13-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 // CHECK13-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK13-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double // CHECK13-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: store double [[ADD3]], ptr [[A]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: store double [[ADD3]], ptr [[A]], align 8, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] // CHECK13-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP12:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !20, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 -// CHECK13-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] // CHECK13-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK13-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP13]] // CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK13-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 -// CHECK13-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: br label [[OMP_IF_END:%.*]] // CHECK13: omp_if.else: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK13: omp.inner.for.cond8: -// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] // CHECK13-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] // CHECK13: omp.inner.for.body10: -// CHECK13-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK13-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 // CHECK13-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] // CHECK13-NEXT: store i64 [[SUB12]], ptr [[IT]], align 8 -// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[B]], align 4 +// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double // CHECK13-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 // CHECK13-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: store double [[ADD14]], ptr [[A15]], align 8 // CHECK13-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP19:%.*]] = load double, ptr [[A16]], align 8 +// CHECK13-NEXT: [[TMP19:%.*]] = load double, ptr [[A16]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 // CHECK13-NEXT: store double [[INC17]], ptr [[A16]], align 8 // CHECK13-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 @@ -8177,10 +8177,10 @@ // CHECK13: omp.body.continue21: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] // CHECK13: omp.inner.for.inc22: -// CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 // CHECK13-NEXT: store i64 [[ADD23]], ptr [[DOTOMP_IV]], align 8 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK13: omp.inner.for.end24: // CHECK13-NEXT: br label [[OMP_IF_END]] // CHECK13: omp_if.end: @@ -8188,9 +8188,9 @@ // CHECK13-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP22]] // CHECK13-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX25]], i64 1 -// CHECK13-NEXT: [[TMP23:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2 +// CHECK13-NEXT: [[TMP23:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] // CHECK13-NEXT: [[TMP25:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP25]]) @@ -8214,7 +8214,7 @@ // CHECK13-NEXT: store i8 0, ptr [[AAA]], align 1 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 429496720, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP0]] // // @@ -8235,42 +8235,42 @@ // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 6, ptr [[DOTOMP_UB]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP0]], ptr [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK13-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK13-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK13-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 -// CHECK13-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i64 11, ptr [[I]], align 8 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP8]] // // @@ -8330,236 +8330,236 @@ // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK15-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK15-NEXT: store i32 [[TMP2]], ptr [[__VLA_EXPR1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 5, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK15-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 33, ptr [[I]], align 4 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK15-NEXT: store i64 [[CALL]], ptr [[K]], align 8 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB4]], align 4 // CHECK15-NEXT: store i32 8, ptr [[DOTOMP_UB5]], align 4 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB4]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV6]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = load i64, ptr [[K]], align 8 +// CHECK15-NEXT: [[TMP10:%.*]] = load i64, ptr [[K]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP10]], ptr [[DOTLINEAR_START]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK15: omp.inner.for.cond9: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK15-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK15: omp.inner.for.body11: -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] -// CHECK15-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: store i32 [[SUB]], ptr [[I7]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] -// CHECK15-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: store i64 [[ADD14]], ptr [[K8]], align 8, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK15-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: store i32 [[ADD15]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK15: omp.body.continue16: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK15: omp.inner.for.inc17: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 -// CHECK15-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end19: // CHECK15-NEXT: store i32 1, ptr [[I7]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = load i64, ptr [[K8]], align 8 +// CHECK15-NEXT: [[TMP18:%.*]] = load i64, ptr [[K8]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP18]], ptr [[K]], align 8 // CHECK15-NEXT: store i32 12, ptr [[LIN]], align 4 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB21]], align 8 // CHECK15-NEXT: store i64 3, ptr [[DOTOMP_UB22]], align 8 -// CHECK15-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8 +// CHECK15-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_LB21]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP19]], ptr [[DOTOMP_IV23]], align 8 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[LIN]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP20]], ptr [[DOTLINEAR_START24]], align 4 -// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP21]], ptr [[DOTLINEAR_START25]], align 4 // CHECK15-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK15-NEXT: store i64 [[CALL26]], ptr [[DOTLINEAR_STEP]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK15: omp.inner.for.cond29: -// CHECK15-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK15-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK15: omp.inner.for.body31: -// CHECK15-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 // CHECK15-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] -// CHECK15-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: store i64 [[SUB33]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 -// CHECK15-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] // CHECK15-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK15-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK15-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: store i32 [[CONV37]], ptr [[LIN27]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK15-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP29:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] // CHECK15-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK15-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 -// CHECK15-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: store i32 [[CONV41]], ptr [[A28]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 // CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK15-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 -// CHECK15-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: store i16 [[CONV44]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK15: omp.body.continue45: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK15: omp.inner.for.inc46: -// CHECK15-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 -// CHECK15-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD47]], ptr [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK15: omp.inner.for.end48: // CHECK15-NEXT: store i64 400, ptr [[IT]], align 8 -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, ptr [[LIN27]], align 4 +// CHECK15-NEXT: [[TMP33:%.*]] = load i32, ptr [[LIN27]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP33]], ptr [[LIN]], align 4 -// CHECK15-NEXT: [[TMP34:%.*]] = load i32, ptr [[A28]], align 4 +// CHECK15-NEXT: [[TMP34:%.*]] = load i32, ptr [[A28]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP34]], ptr [[A]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB50]], align 4 // CHECK15-NEXT: store i32 3, ptr [[DOTOMP_UB51]], align 4 -// CHECK15-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4 +// CHECK15-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_LB50]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP35]], ptr [[DOTOMP_IV52]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK15: omp.inner.for.cond54: -// CHECK15-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK15-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] // CHECK15-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK15: omp.inner.for.body56: -// CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 // CHECK15-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK15-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 -// CHECK15-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] -// CHECK15-NEXT: [[TMP39:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: store i16 [[CONV59]], ptr [[IT53]], align 2, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: [[TMP39:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 -// CHECK15-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK15-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: store i32 [[ADD60]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: [[TMP40:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 // CHECK15-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK15-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 -// CHECK15-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: store i16 [[CONV63]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK15: omp.body.continue64: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK15: omp.inner.for.inc65: -// CHECK15-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK15-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 -// CHECK15-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD66]], ptr [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK15: omp.inner.for.end67: // CHECK15-NEXT: store i16 22, ptr [[IT53]], align 2 -// CHECK15-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP42:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB69]], align 4 // CHECK15-NEXT: store i32 25, ptr [[DOTOMP_UB70]], align 4 -// CHECK15-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4 +// CHECK15-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_LB69]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP43]], ptr [[DOTOMP_IV71]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK15: omp.inner.for.cond73: -// CHECK15-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK15-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] // CHECK15-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK15: omp.inner.for.body75: -// CHECK15-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 // CHECK15-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK15-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 -// CHECK15-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] -// CHECK15-NEXT: [[TMP47:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store i8 [[CONV78]], ptr [[IT72]], align 1, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP47:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 -// CHECK15-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store i32 [[ADD79]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP48:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double // CHECK15-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK15-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float -// CHECK15-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store float [[CONV82]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, ptr [[VLA]], i32 3 -// CHECK15-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP49:%.*]] = load float, ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double // CHECK15-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK15-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float -// CHECK15-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store float [[CONV86]], ptr [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i32 0, i32 1 // CHECK15-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX87]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP50:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP50:%.*]] = load double, ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 -// CHECK15-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store double [[ADD89]], ptr [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK15-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i32 [[TMP51]] // CHECK15-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX90]], i32 3 -// CHECK15-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP52:%.*]] = load double, ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 -// CHECK15-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store double [[ADD92]], ptr [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP53:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP53:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 -// CHECK15-NEXT: store i64 [[ADD93]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store i64 [[ADD93]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP54:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP54:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 // CHECK15-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK15-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 -// CHECK15-NEXT: store i8 [[CONV96]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: store i8 [[CONV96]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK15: omp.body.continue97: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK15: omp.inner.for.inc98: -// CHECK15-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: [[TMP55:%.*]] = load i32, ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 -// CHECK15-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD99]], ptr [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK15: omp.inner.for.end100: // CHECK15-NEXT: store i8 96, ptr [[IT72]], align 1 -// CHECK15-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP56:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP57:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP57]]) // CHECK15-NEXT: ret i32 [[TMP56]] @@ -8573,27 +8573,27 @@ // CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK15-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK15-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK15-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK15-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP8]] // // @@ -8614,81 +8614,81 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK15-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK15-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 -// CHECK15-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 -// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK15: omp_if.then: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK15-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK15-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK15-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double // CHECK15-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: store double [[ADD3]], ptr [[A]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: store double [[ADD3]], ptr [[A]], align 4, !nontemporal !21, !llvm.access.group [[ACC_GRP20]] // CHECK15-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP11:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP11:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !21, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 -// CHECK15-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !21, !llvm.access.group [[ACC_GRP20]] // CHECK15-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK15-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP12]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK15-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 -// CHECK15-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] // CHECK15: omp_if.else: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK15: omp.inner.for.cond8: -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK15-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] // CHECK15-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] // CHECK15: omp.inner.for.body10: -// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 // CHECK15-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] // CHECK15-NEXT: store i64 [[SUB12]], ptr [[IT]], align 8 -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4 +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double // CHECK15-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 // CHECK15-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: store double [[ADD14]], ptr [[A15]], align 4 // CHECK15-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP18:%.*]] = load double, ptr [[A16]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = load double, ptr [[A16]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 // CHECK15-NEXT: store double [[INC17]], ptr [[A16]], align 4 // CHECK15-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 @@ -8700,10 +8700,10 @@ // CHECK15: omp.body.continue21: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] // CHECK15: omp.inner.for.inc22: -// CHECK15-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK15-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 // CHECK15-NEXT: store i64 [[ADD23]], ptr [[DOTOMP_IV]], align 8 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK15: omp.inner.for.end24: // CHECK15-NEXT: br label [[OMP_IF_END]] // CHECK15: omp_if.end: @@ -8711,9 +8711,9 @@ // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP21]] // CHECK15-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX25]], i32 1 -// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2 +// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[ARRAYIDX26]], align 2, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 -// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] // CHECK15-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) @@ -8737,7 +8737,7 @@ // CHECK15-NEXT: store i8 0, ptr [[AAA]], align 1 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 429496720, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP0]] // // @@ -8758,42 +8758,42 @@ // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 6, ptr [[DOTOMP_UB]], align 8 -// CHECK15-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP0]], ptr [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK15-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK15-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK15-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK15-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK15-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 -// CHECK15-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD5]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i64 11, ptr [[I]], align 8 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP8]] // // @@ -8823,45 +8823,45 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK17-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -8883,15 +8883,15 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // @@ -8921,9 +8921,9 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK17-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK17-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -8932,80 +8932,80 @@ // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17:![0-9]+]] -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK17-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK17-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK17-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK17-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK17-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK17-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK17-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK17-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK17-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK17-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK17-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK17-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK17-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK17-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: -// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK17-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK17: .omp.linear.pu: -// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK17-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK17: .omp.linear.pu.done: @@ -9027,12 +9027,12 @@ // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // @@ -9060,54 +9060,54 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK17-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK17-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP20]] -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK17-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK17-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK17-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9143,19 +9143,19 @@ // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK17-NEXT: ret void // @@ -9195,112 +9195,112 @@ // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK17-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK17-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP23]] -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK17-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK17-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK17-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK17-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK17-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK17-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK17-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK17-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK17-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK17-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK17-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK17-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK17-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK17-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK17-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK17-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK17-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK17-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK17-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK17: omp.dispatch.end: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK17-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9325,15 +9325,15 @@ // CHECK17-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -9373,13 +9373,13 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK17-NEXT: ret void // @@ -9408,68 +9408,68 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK17-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: store double [[ADD]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: store double [[ADD]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK17-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK17-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK17-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK17-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP14]] // CHECK17-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP27]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 -// CHECK17-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] +// CHECK17-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9491,12 +9491,12 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -9527,57 +9527,57 @@ // CHECK17-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK17-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK17-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK17-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] +// CHECK17-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9613,45 +9613,45 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12:![0-9]+]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK19-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -9673,15 +9673,15 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // @@ -9711,9 +9711,9 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK19-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK19-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -9722,80 +9722,80 @@ // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK19-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK19-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK19-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK19-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK19-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK19-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK19-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK19-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK19-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK19-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK19-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK19-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK19-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK19-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK19-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK19: .omp.linear.pu: -// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK19: .omp.linear.pu.done: @@ -9817,12 +9817,12 @@ // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // @@ -9850,54 +9850,54 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK19-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK19-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK19-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK19-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -9933,19 +9933,19 @@ // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK19-NEXT: ret void // @@ -9985,112 +9985,112 @@ // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK19-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK19-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK19-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK19-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK19-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK19-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK19-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK19-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK19-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK19-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK19-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK19-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK19-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK19-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK19-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK19-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK19-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK19-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK19: omp.dispatch.end: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK19-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10115,15 +10115,15 @@ // CHECK19-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -10163,13 +10163,13 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK19-NEXT: ret void // @@ -10198,68 +10198,68 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK19-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP5]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: store double [[ADD]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: store double [[ADD]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK19-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK19-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK19-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK19-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP14]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP28]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 -// CHECK19-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK19-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10281,12 +10281,12 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -10317,57 +10317,57 @@ // CHECK19-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK19-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK19-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK19-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK19-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK19-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP31]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK19-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK19-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10403,45 +10403,45 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK21-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -10463,15 +10463,15 @@ // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[LIN_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK21-NEXT: ret void // @@ -10501,9 +10501,9 @@ // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[LIN]], ptr [[LIN_ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK21-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK21-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -10512,80 +10512,80 @@ // CHECK21-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17:![0-9]+]] -// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK21-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK21-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK21-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK21-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK21-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK21-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK21-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK21-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK21-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK21-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK21-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK21-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK21-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK21-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK21-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: -// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK21-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK21: .omp.linear.pu: -// CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK21-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK21-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK21: .omp.linear.pu.done: @@ -10607,12 +10607,12 @@ // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK21-NEXT: ret void // @@ -10640,54 +10640,54 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK21-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK21-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP20]] -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK21-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK21-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK21-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK21-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK21-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -10723,19 +10723,19 @@ // CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK21-NEXT: ret void // @@ -10775,112 +10775,112 @@ // CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK21: omp.dispatch.cond: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK21-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK21: omp.dispatch.body: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK21-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK21-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK21-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP23]] -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK21-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK21-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK21-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK21-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK21-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK21-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK21-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK21-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK21-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK21-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK21-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK21-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK21-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK21-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK21-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK21-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK21-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK21-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK21-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK21: omp.dispatch.inc: -// CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK21-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK21-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK21: omp.dispatch.end: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK21-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK21-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -10905,15 +10905,15 @@ // CHECK21-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK21-NEXT: ret void // @@ -10959,19 +10959,19 @@ // CHECK21-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK21-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK21-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK21-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK21-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: @@ -11014,104 +11014,104 @@ // CHECK21-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK21-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP6]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26:![0-9]+]] -// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK21-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK21-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK21-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: store double [[ADD]], ptr [[A]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: store double [[ADD]], ptr [[A]], align 8, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] // CHECK21-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 8, !nontemporal !28, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK21-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: store double [[INC]], ptr [[A4]], align 8, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] // CHECK21-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK21-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP15]] // CHECK21-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK21-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP27]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD7:%.*]] = add i64 [[TMP16]], 1 -// CHECK21-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK21-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP18]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK21-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP8:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK21-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]] // CHECK21: cond.true9: // CHECK21-NEXT: br label [[COND_END11:%.*]] // CHECK21: cond.false10: -// CHECK21-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END11]] // CHECK21: cond.end11: // CHECK21-NEXT: [[COND12:%.*]] = phi i64 [ 3, [[COND_TRUE9]] ], [ [[TMP20]], [[COND_FALSE10]] ] // CHECK21-NEXT: store i64 [[COND12]], ptr [[DOTOMP_UB]], align 8 -// CHECK21-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK21-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK21: omp.inner.for.cond13: -// CHECK21-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK21-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP14:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK21-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK21: omp.inner.for.body15: -// CHECK21-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK21-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL16:%.*]] = mul i64 [[TMP24]], 400 // CHECK21-NEXT: [[SUB17:%.*]] = sub i64 2000, [[MUL16]] // CHECK21-NEXT: store i64 [[SUB17]], ptr [[IT]], align 8 -// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV18:%.*]] = sitofp i32 [[TMP25]] to double // CHECK21-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], 1.500000e+00 // CHECK21-NEXT: [[A20:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: store double [[ADD19]], ptr [[A20]], align 8 // CHECK21-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 8 +// CHECK21-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[INC22:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK21-NEXT: store double [[INC22]], ptr [[A21]], align 8 // CHECK21-NEXT: [[CONV23:%.*]] = fptosi double [[INC22]] to i16 @@ -11123,19 +11123,19 @@ // CHECK21: omp.body.continue26: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK21: omp.inner.for.inc27: -// CHECK21-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK21-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD28:%.*]] = add i64 [[TMP28]], 1 // CHECK21-NEXT: store i64 [[ADD28]], ptr [[DOTOMP_IV]], align 8 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP30:![0-9]+]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK21: omp.inner.for.end29: // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK21-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11157,12 +11157,12 @@ // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK21-NEXT: ret void // @@ -11193,57 +11193,57 @@ // CHECK21-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK21-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP32]] -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP33]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK21-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK21-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK21-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK21-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK21-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK21-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11279,45 +11279,45 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12:![0-9]+]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK23-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -11339,15 +11339,15 @@ // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP2]], ptr [[LIN_CASTED]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[LIN_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..1, i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK23-NEXT: ret void // @@ -11377,9 +11377,9 @@ // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[LIN]], ptr [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[LIN_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP0]], ptr [[DOTLINEAR_START]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[DOTLINEAR_START1]], align 4 // CHECK23-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK23-NEXT: store i64 [[CALL]], ptr [[DOTLINEAR_STEP]], align 8 @@ -11388,80 +11388,80 @@ // CHECK23-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_barrier(ptr @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK23-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK23-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK23-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i64 [[TMP6]], ptr [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK23-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i32 [[TMP10]] to i64 -// CHECK23-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL5:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK23-NEXT: [[ADD:%.*]] = add i64 [[CONV]], [[MUL5]] // CHECK23-NEXT: [[CONV6:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK23-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: store i32 [[CONV6]], ptr [[LIN2]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV7:%.*]] = sext i32 [[TMP13]] to i64 -// CHECK23-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL8:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK23-NEXT: [[ADD9:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK23-NEXT: [[CONV10:%.*]] = trunc i64 [[ADD9]] to i32 -// CHECK23-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: store i32 [[CONV10]], ptr [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV11:%.*]] = sext i16 [[TMP16]] to i32 // CHECK23-NEXT: [[ADD12:%.*]] = add nsw i32 [[CONV11]], 1 // CHECK23-NEXT: [[CONV13:%.*]] = trunc i32 [[ADD12]] to i16 -// CHECK23-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: store i16 [[CONV13]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK23-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD14:%.*]] = add i64 [[TMP17]], 1 -// CHECK23-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK23-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK23-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i64 400, ptr [[IT]], align 8 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: -// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK23-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK23: .omp.linear.pu: -// CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4 +// CHECK23-NEXT: [[TMP22:%.*]] = load i32, ptr [[LIN2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP22]], ptr [[LIN_ADDR]], align 4 -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4 +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[A3]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP23]], ptr [[A_ADDR]], align 4 // CHECK23-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK23: .omp.linear.pu.done: @@ -11483,12 +11483,12 @@ // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK23-NEXT: ret void // @@ -11516,54 +11516,54 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK23-NEXT: [[CONV:%.*]] = trunc i32 [[ADD]] to i16 -// CHECK23-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: store i16 [[CONV]], ptr [[IT]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK23-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV3:%.*]] = sext i16 [[TMP9]] to i32 // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK23-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 -// CHECK23-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: store i16 [[CONV5]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK23-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK23-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -11599,19 +11599,19 @@ // CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK23-NEXT: ret void // @@ -11651,112 +11651,112 @@ // CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 25, ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK23: omp.dispatch.cond: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK23-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK23: omp.dispatch.body: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK23-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK23-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 -// CHECK23-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store i8 [[CONV]], ptr [[IT]], align 1, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK23-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK23-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK23-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK23-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK23-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK23-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK23-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK23-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK23-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK23-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK23-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK23-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK23-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK23-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK23-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK23-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK23-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK23-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK23-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK23: omp.dispatch.inc: -// CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK23-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK23-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK23: omp.dispatch.end: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP10]]) -// CHECK23-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK23-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -11781,15 +11781,15 @@ // CHECK23-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK23-NEXT: ret void // @@ -11835,19 +11835,19 @@ // CHECK23-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK23-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK23-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK23-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: @@ -11890,104 +11890,104 @@ // CHECK23-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 3, ptr [[DOTOMP_UB]], align 8 // CHECK23-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP4:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP6]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK23-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK23-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK23-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK23-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] -// CHECK23-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: store i64 [[SUB]], ptr [[IT]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK23-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: store double [[ADD]], ptr [[A]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: store double [[ADD]], ptr [[A]], align 4, !nontemporal !29, !llvm.access.group [[ACC_GRP28]] // CHECK23-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: [[TMP14:%.*]] = load double, ptr [[A4]], align 4, !nontemporal !29, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK23-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: store double [[INC]], ptr [[A4]], align 4, !nontemporal !29, !llvm.access.group [[ACC_GRP28]] // CHECK23-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK23-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP15]] // CHECK23-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK23-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP28]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD7:%.*]] = add i64 [[TMP16]], 1 -// CHECK23-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK23-NEXT: store i64 [[ADD7]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(ptr @[[GLOB1]], i32 [[TMP18]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK23-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP8:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK23-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]] // CHECK23: cond.true9: // CHECK23-NEXT: br label [[COND_END11:%.*]] // CHECK23: cond.false10: -// CHECK23-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END11]] // CHECK23: cond.end11: // CHECK23-NEXT: [[COND12:%.*]] = phi i64 [ 3, [[COND_TRUE9]] ], [ [[TMP20]], [[COND_FALSE10]] ] // CHECK23-NEXT: store i64 [[COND12]], ptr [[DOTOMP_UB]], align 8 -// CHECK23-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK23-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i64 [[TMP21]], ptr [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13:%.*]] // CHECK23: omp.inner.for.cond13: -// CHECK23-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK23-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP14:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK23-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY15:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK23: omp.inner.for.body15: -// CHECK23-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK23-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL16:%.*]] = mul i64 [[TMP24]], 400 // CHECK23-NEXT: [[SUB17:%.*]] = sub i64 2000, [[MUL16]] // CHECK23-NEXT: store i64 [[SUB17]], ptr [[IT]], align 8 -// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV18:%.*]] = sitofp i32 [[TMP25]] to double // CHECK23-NEXT: [[ADD19:%.*]] = fadd double [[CONV18]], 1.500000e+00 // CHECK23-NEXT: [[A20:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: store double [[ADD19]], ptr [[A20]], align 4 // CHECK23-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 4 +// CHECK23-NEXT: [[TMP26:%.*]] = load double, ptr [[A21]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[INC22:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK23-NEXT: store double [[INC22]], ptr [[A21]], align 4 // CHECK23-NEXT: [[CONV23:%.*]] = fptosi double [[INC22]] to i16 @@ -11999,19 +11999,19 @@ // CHECK23: omp.body.continue26: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK23: omp.inner.for.inc27: -// CHECK23-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 +// CHECK23-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD28:%.*]] = add i64 [[TMP28]], 1 // CHECK23-NEXT: store i64 [[ADD28]], ptr [[DOTOMP_IV]], align 8 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND13]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK23: omp.inner.for.end29: // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK23-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12033,12 +12033,12 @@ // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK23-NEXT: ret void // @@ -12069,57 +12069,57 @@ // CHECK23-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK23-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK23-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK23-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK23-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i64 [[TMP5]], ptr [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] -// CHECK23-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP33]] -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: store i64 [[ADD]], ptr [[I]], align 8, !llvm.access.group [[ACC_GRP34]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK23-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK23-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK23-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP12]], 1 -// CHECK23-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK23-NEXT: store i64 [[ADD6]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP34]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK23-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: diff --git a/clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp b/clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp --- a/clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l14(ptr [[ARGC_ADDR]], ptr [[TMP0]]) #[[ATTR6:[0-9]+]] // CHECK1-NEXT: ret i32 0 // @@ -55,7 +55,7 @@ // CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[TMP0]], ptr [[TMP1]]) // CHECK1-NEXT: ret void // @@ -84,14 +84,14 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -127,155 +127,155 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP55]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP56]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP58:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: store ptr [[TMP58]], ptr [[TMP57]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = load i32, ptr [[TMP59]], align 4 -// CHECK1-NEXT: [[TMP61:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP60]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP61]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP63]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[TMP64]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP65]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP61]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP67]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP69]], ptr [[TMP68]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP71:%.*]] = load i32, ptr [[TMP70]], align 4 -// CHECK1-NEXT: [[TMP72:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP71]], ptr [[TMP61]]) +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 0, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP51]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP52]], align 8 +// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP54:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP54]], ptr [[TMP53]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP56]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP57]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP58]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[TMP59]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP60]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP57]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP61]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load i32, ptr [[TMP64]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP66:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP65]], ptr [[TMP57]]) +// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP68]], i32 0) +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP69]], align 8 +// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP70]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP72:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP71]], align 8 // CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP74]], i32 0) -// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP75]], align 8 -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP79:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP83:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP81]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP83]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP73]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB2:[0-9]+]], i32 [[TMP74]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP75]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP84:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP84]], [[TMP85]] +// CHECK1-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP76]], [[TMP77]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP86]] +// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP78]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE18:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST12:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT16:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP87:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP87]] to i32 -// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP88]] to i32 +// CHECK1-NEXT: [[TMP79:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP79]] to i32 +// CHECK1-NEXT: [[TMP80:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP80]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV]], [[CONV13]] // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT16]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST12]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP86]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE17:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT16]], [[TMP78]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_DONE18]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done18: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP81]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB2]], i32 [[TMP74]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP90:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP89]] monotonic, align 4 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP82:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP81]] monotonic, align 4 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY19:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP83]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY19]], label [[OMP_ARRAYCPY_DONE32:%.*]], label [[OMP_ARRAYCPY_BODY20:%.*]] // CHECK1: omp.arraycpy.body20: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST21:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT30:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST22:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT29:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP84]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP93:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP98:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP93]], ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[TMP94:%.*]] = load i8, ptr [[_TMP24]], align 1 -// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP94]] to i32 -// CHECK1-NEXT: [[TMP95:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP95]] to i32 +// CHECK1-NEXT: [[TMP85:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY20]] ], [ [[TMP90:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP85]], ptr [[_TMP24]], align 1 +// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[_TMP24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV25:%.*]] = sext i8 [[TMP86]] to i32 +// CHECK1-NEXT: [[TMP87:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP87]] to i32 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[CONV25]], [[CONV26]] // CHECK1-NEXT: [[CONV28:%.*]] = trunc i32 [[ADD27]] to i8 // CHECK1-NEXT: store i8 [[CONV28]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP96:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP97:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP93]], i8 [[TMP96]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP98]] = extractvalue { i8, i1 } [[TMP97]], 0 -// CHECK1-NEXT: [[TMP99:%.*]] = extractvalue { i8, i1 } [[TMP97]], 1 -// CHECK1-NEXT: br i1 [[TMP99]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP89:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i8 [[TMP85]], i8 [[TMP88]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP90]] = extractvalue { i8, i1 } [[TMP89]], 0 +// CHECK1-NEXT: [[TMP91:%.*]] = extractvalue { i8, i1 } [[TMP89]], 1 +// CHECK1-NEXT: br i1 [[TMP91]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT29]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST22]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT30]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST21]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP91]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE31:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT29]], [[TMP83]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE31]], label [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_BODY20]] // CHECK1: omp.arraycpy.done32: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP100:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP100]]) +// CHECK1-NEXT: [[TMP92:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP92]]) // CHECK1-NEXT: ret void // // @@ -286,8 +286,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -298,12 +298,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -316,7 +316,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -338,7 +338,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -347,9 +347,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -392,65 +392,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -462,38 +462,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/target_teams_codegen.cpp b/clang/test/OpenMP/target_teams_codegen.cpp --- a/clang/test/OpenMP/target_teams_codegen.cpp +++ b/clang/test/OpenMP/target_teams_codegen.cpp @@ -351,30 +351,30 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], ptr [[__VLA_EXPR1]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP9]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP10]], ptr [[TMP15]], align 8 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -396,18 +396,18 @@ // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP27]], ptr [[TMP26]], align 4 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP30]], align 4 // CHECK1-NEXT: [[TMP32:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP32]], i32 0, i32 0 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP33]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP32]], i32 0, i32 1 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP36]], i32 0, i32 0 @@ -417,16 +417,16 @@ // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 2 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP39]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP41]], ptr [[TMP40]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP32]]) -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP43]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l105(i64 [[TMP44]]) #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP45]], ptr [[AA_CASTED4]], align 2 -// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -461,13 +461,13 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP46]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP63]], ptr [[A_CASTED8]], align 4 -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP65]], ptr [[AA_CASTED9]], align 2 -// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP67]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -516,10 +516,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP64]], i64 [[TMP66]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP87]], ptr [[A_CASTED16]], align 4 -// CHECK1-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED16]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED16]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[TMP89]], 20 // CHECK1-NEXT: br i1 [[CMP17]], label [[OMP_IF_THEN18:%.*]], label [[OMP_IF_ELSE25:%.*]] // CHECK1: omp_if.then18: @@ -620,9 +620,9 @@ // CHECK1-NEXT: br label [[OMP_IF_END26]] // CHECK1: omp_if.end26: // CHECK1-NEXT: store i32 0, ptr [[NN]], align 4 -// CHECK1-NEXT: [[TMP136:%.*]] = load i32, ptr [[NN]], align 4 +// CHECK1-NEXT: [[TMP136:%.*]] = load i32, ptr [[NN]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP136]], ptr [[NN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP137:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK1-NEXT: [[TMP137:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP137]], ptr [[TMP138]], align 8 // CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 @@ -657,9 +657,9 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP137]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT32]] // CHECK1: omp_offload.cont32: -// CHECK1-NEXT: [[TMP154:%.*]] = load i32, ptr [[NN]], align 4 +// CHECK1-NEXT: [[TMP154:%.*]] = load i32, ptr [[NN]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP154]], ptr [[NN_CASTED33]], align 4 -// CHECK1-NEXT: [[TMP155:%.*]] = load i64, ptr [[NN_CASTED33]], align 8 +// CHECK1-NEXT: [[TMP155:%.*]] = load i64, ptr [[NN_CASTED33]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP155]], ptr [[TMP156]], align 8 // CHECK1-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 @@ -694,7 +694,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP155]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT39]] // CHECK1: omp_offload.cont39: -// CHECK1-NEXT: [[TMP172:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP172:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP173:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP173]]) // CHECK1-NEXT: ret i32 [[TMP172]] @@ -711,12 +711,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK1-NEXT: ret void // @@ -783,65 +783,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !24 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !24 +// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8, !noalias !24 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 0, ptr [[TMP27]], align 8, !noalias !24 +// CHECK1-NEXT: store i64 0, ptr [[TMP27]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB1]], i64 -1, i32 [[TMP18]], i32 [[TMP19]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp_offload.failed.i: -// CHECK1-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK1-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !24 -// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !24 -// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !24 -// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !25 +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !25 +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !25, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !25 +// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !25, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP31]], i64 [[TMP33]], i64 [[TMP35]]) #[[ATTR3]] // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: @@ -854,9 +854,9 @@ // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -870,7 +870,7 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: ret void @@ -882,9 +882,9 @@ // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..3, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -898,7 +898,7 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 @@ -915,12 +915,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..6, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -936,10 +936,10 @@ // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -970,16 +970,16 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..9, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK1-NEXT: ret void // @@ -1010,45 +1010,45 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK1-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK1-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK1-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK1-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK1-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK1-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP12]] // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i64 3 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK1-NEXT: store i64 [[ADD17]], ptr [[X]], align 8 // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK1-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK1-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -1062,9 +1062,9 @@ // CHECK1-NEXT: [[NN_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..12, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -1079,9 +1079,9 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..13, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -1104,9 +1104,9 @@ // CHECK1-NEXT: [[NN_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..16, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -1147,7 +1147,7 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 @@ -1191,7 +1191,7 @@ // CHECK1-NEXT: entry: // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..20, i64 [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1206,7 +1206,7 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: ret void // // @@ -1218,27 +1218,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -1258,20 +1258,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1348,9 +1348,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -1375,16 +1375,16 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1445,7 +1445,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], ptr [[B]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: ret i32 [[TMP32]] // // @@ -1464,13 +1464,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1525,7 +1525,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1543,13 +1543,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..23, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1571,17 +1571,17 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK1-NEXT: store double [[INC]], ptr [[A3]], align 8 // CHECK1-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -1607,15 +1607,15 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..26, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1636,21 +1636,21 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK1-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK1-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK1-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: ret void @@ -1668,12 +1668,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF13]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF13]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..29, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1692,16 +1692,16 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK1-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK1-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF13]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: ret void @@ -1763,28 +1763,28 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP7]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP8]], ptr [[TMP13]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -1806,18 +1806,18 @@ // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP25]], ptr [[TMP24]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP30]], i32 0, i32 0 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP31]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP33]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP30]], i32 0, i32 1 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP34]], i32 0, i32 0 @@ -1827,16 +1827,16 @@ // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 2 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP37]], ptr align 4 [[TMP23]], i32 12, i1 false) // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP39]], ptr [[TMP38]], align 4 // CHECK3-NEXT: [[TMP40:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP30]]) -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP41]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l105(i32 [[TMP42]]) #[[ATTR3:[0-9]+]] -// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP43]], ptr [[AA_CASTED4]], align 2 -// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP44]], ptr [[TMP45]], align 4 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -1871,13 +1871,13 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP44]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP61]], ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP63]], ptr [[AA_CASTED9]], align 2 -// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4 -// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -1926,10 +1926,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP62]], i32 [[TMP64]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP85]], ptr [[A_CASTED16]], align 4 -// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED16]], align 4 -// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED16]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CMP17:%.*]] = icmp sgt i32 [[TMP87]], 20 // CHECK3-NEXT: br i1 [[CMP17]], label [[OMP_IF_THEN18:%.*]], label [[OMP_IF_ELSE25:%.*]] // CHECK3: omp_if.then18: @@ -2032,9 +2032,9 @@ // CHECK3-NEXT: br label [[OMP_IF_END26]] // CHECK3: omp_if.end26: // CHECK3-NEXT: store i32 0, ptr [[NN]], align 4 -// CHECK3-NEXT: [[TMP136:%.*]] = load i32, ptr [[NN]], align 4 +// CHECK3-NEXT: [[TMP136:%.*]] = load i32, ptr [[NN]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP136]], ptr [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP137:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP137:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP137]], ptr [[TMP138]], align 4 // CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 @@ -2069,9 +2069,9 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP137]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT32]] // CHECK3: omp_offload.cont32: -// CHECK3-NEXT: [[TMP154:%.*]] = load i32, ptr [[NN]], align 4 +// CHECK3-NEXT: [[TMP154:%.*]] = load i32, ptr [[NN]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP154]], ptr [[NN_CASTED33]], align 4 -// CHECK3-NEXT: [[TMP155:%.*]] = load i32, ptr [[NN_CASTED33]], align 4 +// CHECK3-NEXT: [[TMP155:%.*]] = load i32, ptr [[NN_CASTED33]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP155]], ptr [[TMP156]], align 4 // CHECK3-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 @@ -2106,7 +2106,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP155]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT39]] // CHECK3: omp_offload.cont39: -// CHECK3-NEXT: [[TMP172:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP172:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP173:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP173]]) // CHECK3-NEXT: ret i32 [[TMP172]] @@ -2123,12 +2123,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK3-NEXT: ret void // @@ -2195,65 +2195,65 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !25 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !25 +// CHECK3-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4, !noalias !25 +// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 0, ptr [[TMP27]], align 8, !noalias !25 +// CHECK3-NEXT: store i64 0, ptr [[TMP27]], align 8, !noalias !26 // CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB1]], i64 -1, i32 [[TMP18]], i32 [[TMP19]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: omp_offload.failed.i: -// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK3-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !25 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !25 -// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !25 +// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !26 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !26, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !26 +// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !26, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP31]], i32 [[TMP33]], i32 [[TMP35]]) #[[ATTR3]] // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: @@ -2266,9 +2266,9 @@ // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..2, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2282,7 +2282,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: ret void @@ -2294,9 +2294,9 @@ // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..3, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2310,7 +2310,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 @@ -2327,12 +2327,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..6, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2348,10 +2348,10 @@ // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -2382,16 +2382,16 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..9, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK3-NEXT: ret void // @@ -2422,45 +2422,45 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK3-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK3-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK3-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK3-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK3-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK3-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK3-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK3-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK3-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP12]] // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i32 3 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK3-NEXT: store i64 [[ADD17]], ptr [[X]], align 4 // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK3-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK3-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -2474,9 +2474,9 @@ // CHECK3-NEXT: [[NN_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..12, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2491,9 +2491,9 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..13, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2516,9 +2516,9 @@ // CHECK3-NEXT: [[NN_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..16, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2559,7 +2559,7 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store ptr [[F]], ptr [[F_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[TMP1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -2602,7 +2602,7 @@ // CHECK3-NEXT: entry: // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..20, i32 [[TMP0]]) // CHECK3-NEXT: ret void // @@ -2617,7 +2617,7 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: ret void // // @@ -2629,27 +2629,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -2669,19 +2669,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2759,9 +2759,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -2786,16 +2786,16 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i8 [[TMP4]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2856,7 +2856,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], ptr [[B]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: ret i32 [[TMP32]] // // @@ -2875,13 +2875,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2936,7 +2936,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -2954,13 +2954,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..23, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2982,17 +2982,17 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK3-NEXT: store double [[INC]], ptr [[A3]], align 4 // CHECK3-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -3018,15 +3018,15 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..26, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3047,21 +3047,21 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK3-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK3-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK3-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: ret void @@ -3079,12 +3079,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF14]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..29, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3103,16 +3103,16 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK3-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: ret void @@ -3136,12 +3136,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF14:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK9-NEXT: ret void // @@ -3164,9 +3164,9 @@ // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..1, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -3180,7 +3180,7 @@ // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 @@ -3197,12 +3197,12 @@ // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // @@ -3218,10 +3218,10 @@ // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -3252,16 +3252,16 @@ // CHECK9-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK9-NEXT: ret void // @@ -3292,45 +3292,45 @@ // CHECK9-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK9-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK9-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK9-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK9-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK9-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK9-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK9-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK9-NEXT: [[TMP12:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK9-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP12]] // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i64 3 -// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK9-NEXT: store i64 [[ADD17]], ptr [[X]], align 8 // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK9-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK9-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -3344,9 +3344,9 @@ // CHECK9-NEXT: [[NN_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..4, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -3361,9 +3361,9 @@ // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..5, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -3386,9 +3386,9 @@ // CHECK9-NEXT: [[NN_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[NN]], ptr [[NN_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[NN_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..6, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -3424,7 +3424,7 @@ // CHECK9-NEXT: entry: // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..8, i64 [[TMP0]]) // CHECK9-NEXT: ret void // @@ -3439,7 +3439,7 @@ // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: ret void // // @@ -3458,15 +3458,15 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..9, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -3487,21 +3487,21 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK9-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK9-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK9-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: ret void @@ -3521,13 +3521,13 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK9-NEXT: ret void // @@ -3549,17 +3549,17 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK9-NEXT: store double [[ADD]], ptr [[A]], align 8 // CHECK9-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK9-NEXT: store double [[INC]], ptr [[A3]], align 8 // CHECK9-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -3582,12 +3582,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF14]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF14]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..11, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -3606,16 +3606,16 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK9-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF14]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK9-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: ret void @@ -3632,12 +3632,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF15:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK11-NEXT: ret void // @@ -3660,9 +3660,9 @@ // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..1, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -3676,7 +3676,7 @@ // CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP0]] to i32 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i32 [[ADD]] to i16 @@ -3693,12 +3693,12 @@ // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // @@ -3714,10 +3714,10 @@ // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP1]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 @@ -3748,16 +3748,16 @@ // CHECK11-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 9, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK11-NEXT: ret void // @@ -3788,45 +3788,45 @@ // CHECK11-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = fpext float [[TMP9]] to double // CHECK11-NEXT: [[ADD5:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK11-NEXT: [[CONV6:%.*]] = fptrunc double [[ADD5]] to float // CHECK11-NEXT: store float [[CONV6]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX7]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV8:%.*]] = fpext float [[TMP10]] to double // CHECK11-NEXT: [[ADD9:%.*]] = fadd double [[CONV8]], 1.000000e+00 // CHECK11-NEXT: [[CONV10:%.*]] = fptrunc double [[ADD9]] to float // CHECK11-NEXT: store float [[CONV10]], ptr [[ARRAYIDX7]], align 4 // CHECK11-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX11]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load double, ptr [[ARRAYIDX12]], align 8, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD13:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD13]], ptr [[ARRAYIDX12]], align 8 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP12]] // CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX14]], i32 3 -// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8 +// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8 // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[X]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i64 [[TMP14]], 1 // CHECK11-NEXT: store i64 [[ADD17]], ptr [[X]], align 4 // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[Y]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV18:%.*]] = sext i8 [[TMP15]] to i32 // CHECK11-NEXT: [[ADD19:%.*]] = add nsw i32 [[CONV18]], 1 // CHECK11-NEXT: [[CONV20:%.*]] = trunc i32 [[ADD19]] to i8 @@ -3840,9 +3840,9 @@ // CHECK11-NEXT: [[NN_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..4, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -3857,9 +3857,9 @@ // CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..5, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -3882,9 +3882,9 @@ // CHECK11-NEXT: [[NN_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[NN]], ptr [[NN_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[NN_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[NN_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[NN_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..6, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -3920,7 +3920,7 @@ // CHECK11-NEXT: entry: // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 1, ptr @.omp_outlined..8, i32 [[TMP0]]) // CHECK11-NEXT: ret void // @@ -3935,7 +3935,7 @@ // CHECK11-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK11-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: ret void // // @@ -3954,15 +3954,15 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i8 [[TMP5]], ptr [[AAA_CASTED]], align 1 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..9, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -3983,21 +3983,21 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK11-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP3:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV3:%.*]] = sext i8 [[TMP3]] to i32 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK11-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i8 // CHECK11-NEXT: store i8 [[CONV5]], ptr [[AAA_ADDR]], align 1 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK11-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: ret void @@ -4017,13 +4017,13 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 5, ptr @.omp_outlined..10, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK11-NEXT: ret void // @@ -4045,17 +4045,17 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP4]] to double // CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK11-NEXT: store double [[ADD]], ptr [[A]], align 4 // CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load double, ptr [[A3]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP5]], 1.000000e+00 // CHECK11-NEXT: store double [[INC]], ptr [[A3]], align 4 // CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 @@ -4078,12 +4078,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF15]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1]], i32 3, ptr @.omp_outlined..11, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -4102,16 +4102,16 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP2]] to i32 // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD1]] to i16 // CHECK11-NEXT: store i16 [[CONV2]], ptr [[AA_ADDR]], align 2 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF15]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP3]], 1 // CHECK11-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: ret void diff --git a/clang/test/OpenMP/target_teams_distribute_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_codegen.cpp @@ -339,30 +339,30 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], ptr [[__VLA_EXPR1]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP9]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP10]], ptr [[TMP15]], align 8 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -384,18 +384,18 @@ // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP27]], ptr [[TMP26]], align 4 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP30]], align 4 // CHECK1-NEXT: [[TMP32:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP32]], i32 0, i32 0 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP33]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP32]], i32 0, i32 1 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP36]], i32 0, i32 0 @@ -405,16 +405,16 @@ // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 2 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP39]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP41]], ptr [[TMP40]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP32]]) -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP43]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l107(i64 [[TMP44]]) #[[ATTR3:[0-9]+]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP45]], ptr [[AA_CASTED4]], align 2 -// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -449,13 +449,13 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l113(i64 [[TMP46]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP63]], ptr [[A_CASTED8]], align 4 -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP65]], ptr [[AA_CASTED9]], align 2 -// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP67]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -504,15 +504,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l120(i64 [[TMP64]], i64 [[TMP66]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP87]], ptr [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP88:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP88]], ptr [[A_CASTED18]], align 4 -// CHECK1-NEXT: [[TMP89:%.*]] = load i64, ptr [[A_CASTED18]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK1-NEXT: [[TMP89:%.*]] = load i64, ptr [[A_CASTED18]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP90]], ptr [[DOTCAPTURE_EXPR__CASTED19]], align 4 -// CHECK1-NEXT: [[TMP91:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED19]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP91:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED19]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP92:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP20:%.*]] = icmp sgt i32 [[TMP92]], 20 // CHECK1-NEXT: br i1 [[CMP20]], label [[OMP_IF_THEN21:%.*]], label [[OMP_IF_ELSE29:%.*]] // CHECK1: omp_if.then21: @@ -618,7 +618,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP89]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]], i64 [[TMP91]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END30]] // CHECK1: omp_if.end30: -// CHECK1-NEXT: [[TMP142:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP142:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP143:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP143]]) // CHECK1-NEXT: ret i32 [[TMP142]] @@ -635,12 +635,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK1-NEXT: ret void // @@ -666,29 +666,29 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -696,7 +696,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -757,65 +757,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !21 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !21 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !22 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !21 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !22 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !21 +// CHECK1-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !22 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8, !noalias !21 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 10, ptr [[TMP27]], align 8, !noalias !21 +// CHECK1-NEXT: store i64 10, ptr [[TMP27]], align 8, !noalias !22 // CHECK1-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 [[TMP19]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK1-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp_offload.failed.i: -// CHECK1-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK1-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !21 -// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !21 -// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !21 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !21 -// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !21 +// CHECK1-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !22 +// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !22, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !22 +// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !22, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !22 +// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !22, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103(i64 [[TMP31]], i64 [[TMP33]], i64 [[TMP35]]) #[[ATTR3]] // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: @@ -828,9 +828,9 @@ // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -856,40 +856,40 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -906,9 +906,9 @@ // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -934,33 +934,33 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 @@ -969,7 +969,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -989,12 +989,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1022,36 +1022,36 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 @@ -1060,7 +1060,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1097,19 +1097,19 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..9, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK1-NEXT: ret void // @@ -1149,105 +1149,105 @@ // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK1-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP20]] to double // CHECK1-NEXT: [[ADD8:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK1-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK1-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK1-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK1-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK1-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK1-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK1-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK1-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK1-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] @@ -1264,27 +1264,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -1305,20 +1305,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1395,9 +1395,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -1427,19 +1427,19 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[N_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1475,19 +1475,19 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK1-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -1521,7 +1521,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], ptr [[B]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP43]] // // @@ -1541,13 +1541,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1602,7 +1602,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1620,13 +1620,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..12, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1655,48 +1655,48 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK1-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: store double [[ADD4]], ptr [[A]], align 8 // CHECK1-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK1-NEXT: store double [[INC]], ptr [[A5]], align 8 // CHECK1-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 @@ -1708,7 +1708,7 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1737,18 +1737,18 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP3]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1782,83 +1782,83 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK1-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK1-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] // CHECK1-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK1-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 // CHECK1-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2 -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK1-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK1-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 // CHECK1-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -1866,7 +1866,7 @@ // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) // CHECK1-NEXT: br label [[OMP_PRECOND_END]] // CHECK1: omp.precond.end: @@ -1885,12 +1885,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1921,49 +1921,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2026,28 +2026,28 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP7]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP8]], ptr [[TMP13]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -2069,18 +2069,18 @@ // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP25]], ptr [[TMP24]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP30]], i32 0, i32 0 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP31]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP33]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP30]], i32 0, i32 1 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP34]], i32 0, i32 0 @@ -2090,16 +2090,16 @@ // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 2 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP37]], ptr align 4 [[TMP23]], i32 12, i1 false) // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP39]], ptr [[TMP38]], align 4 // CHECK3-NEXT: [[TMP40:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP30]]) -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP41]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l107(i32 [[TMP42]]) #[[ATTR3:[0-9]+]] -// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP43]], ptr [[AA_CASTED4]], align 2 -// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP44]], ptr [[TMP45]], align 4 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -2134,13 +2134,13 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l113(i32 [[TMP44]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP61]], ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP63]], ptr [[AA_CASTED9]], align 2 -// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4 -// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2189,15 +2189,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l120(i32 [[TMP62]], i32 [[TMP64]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP85]], ptr [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP86]], ptr [[A_CASTED18]], align 4 -// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[A_CASTED18]], align 4 -// CHECK3-NEXT: [[TMP88:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[A_CASTED18]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP88:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_17]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP88]], ptr [[DOTCAPTURE_EXPR__CASTED19]], align 4 -// CHECK3-NEXT: [[TMP89:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED19]], align 4 -// CHECK3-NEXT: [[TMP90:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP89:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED19]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP90:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP20:%.*]] = icmp sgt i32 [[TMP90]], 20 // CHECK3-NEXT: br i1 [[CMP20]], label [[OMP_IF_THEN21:%.*]], label [[OMP_IF_ELSE29:%.*]] // CHECK3: omp_if.then21: @@ -2305,7 +2305,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP87]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]], i32 [[TMP89]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END30]] // CHECK3: omp_if.end30: -// CHECK3-NEXT: [[TMP142:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP142:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP143:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP143]]) // CHECK3-NEXT: ret i32 [[TMP142]] @@ -2322,12 +2322,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK3-NEXT: ret void // @@ -2353,29 +2353,29 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -2383,7 +2383,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2444,65 +2444,65 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !22 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !23 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR3]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !22 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !22 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !22 +// CHECK3-NEXT: store i32 3, ptr [[TMP20]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP21]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP22]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP23]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP24]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4, !noalias !22 +// CHECK3-NEXT: store ptr null, ptr [[TMP26]], align 4, !noalias !23 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 10, ptr [[TMP27]], align 8, !noalias !22 +// CHECK3-NEXT: store i64 10, ptr [[TMP27]], align 8, !noalias !23 // CHECK3-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 [[TMP19]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 // CHECK3-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: omp_offload.failed.i: -// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK3-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !22 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !22 -// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !22 +// CHECK3-NEXT: [[TMP30:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i16 [[TMP30]], ptr [[AA_CASTED_I]], align 2, !noalias !23 +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !23, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !23, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP34]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !23 +// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !23, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103(i32 [[TMP31]], i32 [[TMP33]], i32 [[TMP35]]) #[[ATTR3]] // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: @@ -2515,9 +2515,9 @@ // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2543,40 +2543,40 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2593,9 +2593,9 @@ // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2621,33 +2621,33 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 @@ -2656,7 +2656,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2676,12 +2676,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2709,36 +2709,36 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 @@ -2747,7 +2747,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -2784,19 +2784,19 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..9, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK3-NEXT: ret void // @@ -2836,105 +2836,105 @@ // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK3-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = fpext float [[TMP20]] to double // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK3-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK3-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK3-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK3-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK3-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK3-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] @@ -2951,27 +2951,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -2992,19 +2992,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3082,9 +3082,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -3114,19 +3114,19 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3162,19 +3162,19 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK3-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -3208,7 +3208,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], ptr [[B]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP43]] // // @@ -3228,13 +3228,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3289,7 +3289,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -3307,13 +3307,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..12, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -3342,48 +3342,48 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK3-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: store double [[ADD4]], ptr [[A]], align 4 // CHECK3-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK3-NEXT: store double [[INC]], ptr [[A5]], align 4 // CHECK3-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 @@ -3395,7 +3395,7 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3424,18 +3424,18 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3469,83 +3469,83 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK3-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK3-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] // CHECK3-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK3-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 // CHECK3-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2 -// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK3-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK3-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK3-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 // CHECK3-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3553,7 +3553,7 @@ // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) // CHECK3-NEXT: br label [[OMP_PRECOND_END]] // CHECK3: omp.precond.end: @@ -3572,12 +3572,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3608,49 +3608,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3679,12 +3679,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK9-NEXT: ret void // @@ -3710,29 +3710,29 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -3740,7 +3740,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3757,9 +3757,9 @@ // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -3785,33 +3785,33 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 @@ -3820,7 +3820,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3840,12 +3840,12 @@ // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // @@ -3873,36 +3873,36 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 @@ -3911,7 +3911,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK9-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -3948,19 +3948,19 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i64 [[TMP11]]) // CHECK9-NEXT: ret void // @@ -4000,105 +4000,105 @@ // CHECK9-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK9: omp.dispatch.cond: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK9: omp.dispatch.body: // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = fpext float [[TMP20]] to double // CHECK9-NEXT: [[ADD8:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK9-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK9-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK9-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK9-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK9-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK9-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK9-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP23]] // CHECK9-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i64 3 -// CHECK9-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK9-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store i64 [[ADD20]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK9-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK9-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK9-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store i8 [[CONV23]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK9-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK9: omp.dispatch.inc: -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK9-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK9-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]] @@ -4125,18 +4125,18 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -4170,83 +4170,83 @@ // CHECK9-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK9-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK9-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK9-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK9-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK9-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK9-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK9-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] // CHECK9-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK9-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK9-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 // CHECK9-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2 -// CHECK9-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK9-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK9-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK9-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK9-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 // CHECK9-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4254,7 +4254,7 @@ // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) // CHECK9-NEXT: br label [[OMP_PRECOND_END]] // CHECK9: omp.precond.end: @@ -4275,13 +4275,13 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK9-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK9-NEXT: ret void // @@ -4310,48 +4310,48 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK9-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK9-NEXT: store double [[ADD4]], ptr [[A]], align 8 // CHECK9-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK9-NEXT: store double [[INC]], ptr [[A5]], align 8 // CHECK9-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 @@ -4363,7 +4363,7 @@ // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK9-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4386,12 +4386,12 @@ // CHECK9-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK9-NEXT: ret void // @@ -4422,49 +4422,49 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK9-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK9-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK9-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4486,12 +4486,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12:![0-9]+]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK11-NEXT: ret void // @@ -4517,29 +4517,29 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -4547,7 +4547,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4564,9 +4564,9 @@ // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i32 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -4592,33 +4592,33 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 @@ -4627,7 +4627,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK11-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4647,12 +4647,12 @@ // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // @@ -4680,36 +4680,36 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 @@ -4718,7 +4718,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -4755,19 +4755,19 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP10]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 10, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]], i32 [[TMP11]]) // CHECK11-NEXT: ret void // @@ -4807,105 +4807,105 @@ // CHECK11-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP10]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK11: omp.dispatch.cond: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK11: omp.dispatch.body: // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK11-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store i32 [[ADD7]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = fpext float [[TMP20]] to double // CHECK11-NEXT: [[ADD8:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK11-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float -// CHECK11-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store float [[CONV9]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK11-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK11-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK11-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float -// CHECK11-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store float [[CONV13]], ptr [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX14]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP22:%.*]] = load double, ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store double [[ADD16]], ptr [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK11-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP23]] // CHECK11-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX17]], i32 3 -// CHECK11-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP24:%.*]] = load double, ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store double [[ADD19]], ptr [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK11-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store i64 [[ADD20]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK11-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK11-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 -// CHECK11-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store i8 [[CONV23]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 -// CHECK11-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK11: omp.dispatch.inc: -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK11-NEXT: store i32 [[ADD25]], ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK11-NEXT: store i32 [[ADD26]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]] @@ -4932,18 +4932,18 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -4977,83 +4977,83 @@ // CHECK11-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK11-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK11-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK11-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK11-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK11-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK11-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK11-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] // CHECK11-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK11-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 // CHECK11-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2 -// CHECK11-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK11-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK11-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 // CHECK11-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK11-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 // CHECK11-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5061,7 +5061,7 @@ // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) // CHECK11-NEXT: br label [[OMP_PRECOND_END]] // CHECK11: omp.precond.end: @@ -5082,13 +5082,13 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK11-NEXT: ret void // @@ -5117,48 +5117,48 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK11-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK11-NEXT: store double [[ADD4]], ptr [[A]], align 4 // CHECK11-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK11-NEXT: store double [[INC]], ptr [[A5]], align 4 // CHECK11-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 @@ -5170,7 +5170,7 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 // CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -5193,12 +5193,12 @@ // CHECK11-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK11-NEXT: ret void // @@ -5229,49 +5229,49 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK11-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 // CHECK11-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK11-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_reduction_task_codegen.cpp @@ -42,7 +42,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l14(ptr [[ARGC_ADDR]], ptr [[TMP0]]) #[[ATTR6:[0-9]+]] // CHECK1-NEXT: ret i32 0 // @@ -55,7 +55,7 @@ // CHECK1-NEXT: store ptr [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[TMP0]], ptr [[TMP1]]) // CHECK1-NEXT: ret void // @@ -90,14 +90,14 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -133,178 +133,178 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 // CHECK1-NEXT: store i64 9, ptr [[DOTOMP_COMB_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP57]], 9 +// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP52]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP53]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP58]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP54]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP59]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP55]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP60]], [[TMP61]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP56]], [[TMP57]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..3, i64 [[TMP62]], i64 [[TMP63]], ptr [[ARGC1]], ptr [[TMP64]]) +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..3, i64 [[TMP58]], i64 [[TMP59]], ptr [[ARGC1]], ptr [[TMP60]]) // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP65]], [[TMP66]] +// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP61]], [[TMP62]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP68]]) -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP70]], i32 1) -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP71]], align 8 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP73]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP75:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP75]], ptr [[TMP74]], align 8 -// CHECK1-NEXT: [[TMP76:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP76]], align 4 -// CHECK1-NEXT: [[TMP79:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP77]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.9, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP79]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[TMP63]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP64]]) +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP66]], i32 1) +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP67]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP68]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP70:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP70]], ptr [[TMP69]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP71]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP73:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP72]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.9, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP73]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP80]], [[TMP81]] +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP74]], [[TMP75]] // CHECK1-NEXT: store i32 [[ADD14]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP82]] +// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP76]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST15:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP83:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP83]] to i32 -// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP84]] to i32 +// CHECK1-NEXT: [[TMP77:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP77]] to i32 +// CHECK1-NEXT: [[TMP78:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP78]] to i32 // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[CONV]], [[CONV16]] // CHECK1-NEXT: [[CONV18:%.*]] = trunc i32 [[ADD17]] to i8 // CHECK1-NEXT: store i8 [[CONV18]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT19]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP82]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP76]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done21: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP77]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP72]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP86:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP85]] monotonic, align 4 -// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP87]] +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP80:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP79]] monotonic, align 4 +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP81]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY22]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY23:%.*]] // CHECK1: omp.arraycpy.body23: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST24:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST25:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP88]] to i32 +// CHECK1-NEXT: [[TMP82:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP82]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP89:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP94:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP89]], ptr [[_TMP27]], align 1 -// CHECK1-NEXT: [[TMP90:%.*]] = load i8, ptr [[_TMP27]], align 1 -// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP90]] to i32 -// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP91]] to i32 +// CHECK1-NEXT: [[TMP83:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP88:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP83]], ptr [[_TMP27]], align 1 +// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[_TMP27]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP84]] to i32 +// CHECK1-NEXT: [[TMP85:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP85]] to i32 // CHECK1-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV28]], [[CONV29]] // CHECK1-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i8 // CHECK1-NEXT: store i8 [[CONV31]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP93:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP89]], i8 [[TMP92]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP94]] = extractvalue { i8, i1 } [[TMP93]], 0 -// CHECK1-NEXT: [[TMP95:%.*]] = extractvalue { i8, i1 } [[TMP93]], 1 -// CHECK1-NEXT: br i1 [[TMP95]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP87:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP83]], i8 [[TMP86]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP88]] = extractvalue { i8, i1 } [[TMP87]], 0 +// CHECK1-NEXT: [[TMP89:%.*]] = extractvalue { i8, i1 } [[TMP87]], 1 +// CHECK1-NEXT: br i1 [[TMP89]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP87]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP81]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY23]] // CHECK1: omp.arraycpy.done35: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP96:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP96]]) +// CHECK1-NEXT: [[TMP90:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP90]]) // CHECK1-NEXT: ret void // // @@ -315,8 +315,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -327,12 +327,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -345,7 +345,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -367,7 +367,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -376,9 +376,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -427,21 +427,21 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP3]], i64 0 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP6]] -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP7]], i64 9 // CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[LB_ADD_LEN]] @@ -477,198 +477,198 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..4, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..5, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP32]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..4, ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..5, ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP35]], i64 0 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 0 -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = sext i32 [[TMP37]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP38]] -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP39]], i64 9 -// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP40]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP44:%.*]] = sub i64 [[TMP42]], [[TMP43]] -// CHECK1-NEXT: [[TMP45:%.*]] = sdiv exact i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = add nuw i64 [[TMP45]], 1 -// CHECK1-NEXT: [[TMP47:%.*]] = mul nuw i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP47]], ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..6, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..7, ptr [[TMP51]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP52]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 0 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i64 0 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = sext i32 [[TMP34]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP35]] +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP36]], i64 9 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP37]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP38]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = add nuw i64 [[TMP42]], 1 +// CHECK1-NEXT: [[TMP44:%.*]] = mul nuw i64 [[TMP43]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP44]], ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..6, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..7, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[TMP50]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP52:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP51]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP52]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP53:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4 -// CHECK1-NEXT: [[TMP56:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP54]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP56]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[TMP57]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP58]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP59]], 9 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP54]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP55]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP60]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP56]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP61]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP57]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP58]], [[TMP59]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP64]], 1 +// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP60]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP67]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: [[TMP71:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP70]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP73]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP75:%.*]] = load ptr, ptr [[TMP74]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP75]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP71]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP77]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP81]], ptr [[TMP71]]) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP67:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP66]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP68]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[TMP69]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP70]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP67]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP71]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP74]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP75]], ptr [[TMP67]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP83:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP83]], 1 +// CHECK1-NEXT: [[TMP77:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP77]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP85]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP79]]) +// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP81]], i32 1) +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP83]], align 8 +// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP85:%.*]] = inttoptr i64 [[TMP13]] to ptr +// CHECK1-NEXT: store ptr [[TMP85]], ptr [[TMP84]], align 8 // CHECK1-NEXT: [[TMP86:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP87]], i32 1) -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP92:%.*]] = inttoptr i64 [[TMP13]] to ptr -// CHECK1-NEXT: store ptr [[TMP92]], ptr [[TMP91]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP93]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP94]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP96]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP87]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP88]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP97]], [[TMP98]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP89]], [[TMP90]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP99]] +// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP100]] to i32 -// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP101]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP93:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP93]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP99]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP94]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP87]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP102:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP102]] monotonic, align 4 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP104]] +// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP95:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP94]] monotonic, align 4 +// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP105]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP97]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP106:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP111:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP106]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP107]] to i32 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP108]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP103:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP98]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP100]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP110:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP106]], i8 [[TMP109]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP111]] = extractvalue { i8, i1 } [[TMP110]], 0 -// CHECK1-NEXT: [[TMP112:%.*]] = extractvalue { i8, i1 } [[TMP110]], 1 -// CHECK1-NEXT: br i1 [[TMP112]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP102:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP98]], i8 [[TMP101]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP103]] = extractvalue { i8, i1 } [[TMP102]], 0 +// CHECK1-NEXT: [[TMP104:%.*]] = extractvalue { i8, i1 } [[TMP102]], 1 +// CHECK1-NEXT: br i1 [[TMP104]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP104]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP113:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP113]]) +// CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // @@ -679,8 +679,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -691,12 +691,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -709,7 +709,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -731,7 +731,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -740,9 +740,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -785,65 +785,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -855,38 +855,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void @@ -900,38 +900,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -121,34 +121,34 @@ // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -156,8 +156,8 @@ // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i64 0, i64 0 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK1-NEXT: ret i32 [[TMP18]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF4:![0-9]+]] +// CHECK1-NEXT: ret i32 [[TMP16]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -165,7 +165,7 @@ // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -188,51 +188,51 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group !4 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -265,74 +265,74 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 456 // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 456 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]] // CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] -// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP14]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM8]] -// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -372,34 +372,34 @@ // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -407,8 +407,8 @@ // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK3-NEXT: ret i32 [[TMP18]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF5:![0-9]+]] +// CHECK3-NEXT: ret i32 [[TMP16]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -416,7 +416,7 @@ // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -439,49 +439,49 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group !5 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -514,70 +514,70 @@ // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP12]], 456 // CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL4]] // CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP14]] -// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -618,51 +618,51 @@ // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK5-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK5-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 123, ptr [[I]], align 4 // CHECK5-NEXT: store i32 456, ptr [[J]], align 4 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A10]], i64 0, i64 0 // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX11]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: ret i32 [[TMP9]] // // @@ -689,49 +689,49 @@ // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK7-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK7-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 123, ptr [[I]], align 4 // CHECK7-NEXT: store i32 456, ptr [[J]], align 4 // CHECK7-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A9]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX10]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: ret i32 [[TMP9]] // // @@ -753,18 +753,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x ptr], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -772,100 +772,100 @@ // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 40, i1 false) -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP25]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP42:%.*]] = load i32, ptr [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP43]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP45]], 0 -// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 -// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] -// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP46]], 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP34]], 0 +// CHECK9-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 +// CHECK9-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] +// CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP35]], 1 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP47]], align 4 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 5, ptr [[TMP48]], align 4 -// CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 8 -// CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[TMP41]], ptr [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP54]], align 8 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP55]], align 8 -// CHECK9-NEXT: [[TMP56:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP57:%.*]] = icmp ne i32 [[TMP56]], 0 -// CHECK9-NEXT: br i1 [[TMP57]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 5, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 8 +// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[TMP30]], ptr [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP41]], align 8 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP42]], align 8 +// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP44]], align 8 +// CHECK9-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK9-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP58]]) +// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP47]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) -// CHECK9-NEXT: [[TMP60:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP60]] +// CHECK9-NEXT: [[TMP48:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP48]]) +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP49]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -883,15 +883,15 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 5, ptr @.omp_outlined., i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]) // CHECK9-NEXT: ret void // @@ -908,18 +908,18 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[J:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I13:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[J14:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I11:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[J12:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 @@ -929,102 +929,102 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV7:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP6]], 0 -// CHECK9-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1 -// CHECK9-NEXT: [[CONV10:%.*]] = sext i32 [[DIV9]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV7]], [[CONV10]] -// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB11]], ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 +// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV8]] +// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[CMP12:%.*]] = icmp slt i32 0, [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP12]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_COMB_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 -// CHECK9-NEXT: [[CMP15:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] +// CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP16]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[CMP16:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] -// CHECK9-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] +// CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[N_ADDR]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: store i32 [[TMP21]], ptr [[N_CASTED]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[N_CASTED]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[M_ADDR]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: store i32 [[TMP23]], ptr [[M_CASTED]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[M_CASTED]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i64 [[TMP19]], i64 [[TMP20]], i64 [[TMP22]], i64 [[TMP24]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group !5 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[N_ADDR]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP21]], ptr [[N_CASTED]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[N_CASTED]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[M_ADDR]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP23]], ptr [[M_CASTED]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[M_CASTED]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i64 [[TMP19]], i64 [[TMP20]], i64 [[TMP22]], i64 [[TMP24]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP25]], [[TMP26]] -// CHECK9-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 +// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP28]]) -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK9-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP31]], 0 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP31]], 0 +// CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 +// CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 +// CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 0, [[MUL17]] +// CHECK9-NEXT: store i32 [[ADD18]], ptr [[I11]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK9-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK9-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 // CHECK9-NEXT: [[ADD22:%.*]] = add nsw i32 0, [[MUL21]] -// CHECK9-NEXT: store i32 [[ADD22]], ptr [[I13]], align 4 -// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP32]], 0 -// CHECK9-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 -// CHECK9-NEXT: [[MUL25:%.*]] = mul nsw i32 [[DIV24]], 1 -// CHECK9-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] -// CHECK9-NEXT: store i32 [[ADD26]], ptr [[J14]], align 4 +// CHECK9-NEXT: store i32 [[ADD22]], ptr [[J12]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: br label [[OMP_PRECOND_END]] @@ -1046,18 +1046,18 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[J:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I13:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[J14:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I11:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[J12:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 @@ -1067,136 +1067,136 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV7:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP6]], 0 -// CHECK9-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1 -// CHECK9-NEXT: [[CONV10:%.*]] = sext i32 [[DIV9]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV7]], [[CONV10]] -// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB11]], ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 +// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV8]] +// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[CMP12:%.*]] = icmp slt i32 0, [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP12]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP10]], ptr [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 -// CHECK9-NEXT: [[CMP15:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] -// CHECK9-NEXT: br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] +// CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[CMP16:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] -// CHECK9-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] +// CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[SUB17:%.*]] = sub nsw i32 [[TMP22]], 0 -// CHECK9-NEXT: [[DIV18:%.*]] = sdiv i32 [[SUB17]], 1 -// CHECK9-NEXT: [[MUL19:%.*]] = mul nsw i32 1, [[DIV18]] -// CHECK9-NEXT: [[CONV20:%.*]] = sext i32 [[MUL19]] to i64 -// CHECK9-NEXT: [[DIV21:%.*]] = sdiv i64 [[TMP21]], [[CONV20]] -// CHECK9-NEXT: [[MUL22:%.*]] = mul nsw i64 [[DIV21]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL22]] -// CHECK9-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK9-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP25]], 0 -// CHECK9-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 -// CHECK9-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] -// CHECK9-NEXT: [[CONV27:%.*]] = sext i32 [[MUL26]] to i64 -// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i64 [[TMP24]], [[CONV27]] -// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP26]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[MUL31:%.*]] = mul nsw i32 1, [[DIV30]] -// CHECK9-NEXT: [[CONV32:%.*]] = sext i32 [[MUL31]] to i64 -// CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[DIV28]], [[CONV32]] -// CHECK9-NEXT: [[SUB34:%.*]] = sub nsw i64 [[TMP23]], [[MUL33]] -// CHECK9-NEXT: [[MUL35:%.*]] = mul nsw i64 [[SUB34]], 1 -// CHECK9-NEXT: [[ADD36:%.*]] = add nsw i64 0, [[MUL35]] -// CHECK9-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK9-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP22]], 0 +// CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 +// CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] +// CHECK9-NEXT: [[CONV18:%.*]] = sext i32 [[MUL17]] to i64 +// CHECK9-NEXT: [[DIV19:%.*]] = sdiv i64 [[TMP21]], [[CONV18]] +// CHECK9-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] +// CHECK9-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 +// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP25]], 0 +// CHECK9-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 +// CHECK9-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] +// CHECK9-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 +// CHECK9-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP24]], [[CONV25]] +// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP26]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] +// CHECK9-NEXT: [[CONV30:%.*]] = sext i32 [[MUL29]] to i64 +// CHECK9-NEXT: [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]] +// CHECK9-NEXT: [[SUB32:%.*]] = sub nsw i64 [[TMP23]], [[MUL31]] +// CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 +// CHECK9-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] +// CHECK9-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 +// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP27]] to i64 // CHECK9-NEXT: [[TMP28:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP1]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[TMP28]] -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP29]] to i64 -// CHECK9-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM38]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX39]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP29]] to i64 +// CHECK9-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM36]] +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[ADD40:%.*]] = add nsw i64 [[TMP30]], 1 -// CHECK9-NEXT: store i64 [[ADD40]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD38:%.*]] = add nsw i64 [[TMP30]], 1 +// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP32]]) -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK9-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP35]], 0 -// CHECK9-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 -// CHECK9-NEXT: [[MUL43:%.*]] = mul nsw i32 [[DIV42]], 1 -// CHECK9-NEXT: [[ADD44:%.*]] = add nsw i32 0, [[MUL43]] -// CHECK9-NEXT: store i32 [[ADD44]], ptr [[I13]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB45:%.*]] = sub nsw i32 [[TMP36]], 0 -// CHECK9-NEXT: [[DIV46:%.*]] = sdiv i32 [[SUB45]], 1 -// CHECK9-NEXT: [[MUL47:%.*]] = mul nsw i32 [[DIV46]], 1 -// CHECK9-NEXT: [[ADD48:%.*]] = add nsw i32 0, [[MUL47]] -// CHECK9-NEXT: store i32 [[ADD48]], ptr [[J14]], align 4 +// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0 +// CHECK9-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 +// CHECK9-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 +// CHECK9-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]] +// CHECK9-NEXT: store i32 [[ADD42]], ptr [[I11]], align 4 +// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK9-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 +// CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[DIV44]], 1 +// CHECK9-NEXT: [[ADD46:%.*]] = add nsw i32 0, [[MUL45]] +// CHECK9-NEXT: store i32 [[ADD46]], ptr [[J12]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: br label [[OMP_PRECOND_END]] @@ -1217,34 +1217,34 @@ // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store ptr [[A]], ptr [[TMP0]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP12]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK9-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68(ptr [[A]]) #[[ATTR3]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1286,45 +1286,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group !14 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1360,70 +1360,70 @@ // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 2 // CHECK9-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 2 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]] // CHECK9-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP14]] to i64 // CHECK9-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM8]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1468,109 +1468,109 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 // CHECK11-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 // CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 40, i1 false) -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK11-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP18]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP23]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP29]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP41]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP43]], 0 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP45]], 1 +// CHECK11-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP34]], 1 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 5, ptr [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP38]], ptr [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 4 -// CHECK11-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP51]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP54]], align 8 -// CHECK11-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 -// CHECK11-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 5, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP27]], ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP40]], align 4 +// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP42]], align 4 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP43]], align 8 +// CHECK11-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK11-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP57:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP57]]) +// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP46]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP58:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP58]]) -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP59]] +// CHECK11-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK11-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP48]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -1588,15 +1588,15 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 5, ptr @.omp_outlined., i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]) // CHECK11-NEXT: ret void // @@ -1634,18 +1634,18 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1654,79 +1654,79 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_COMB_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] // CHECK11-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP16]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] // CHECK11-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32 -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP21]] to i32 -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[N_ADDR]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: store i32 [[TMP23]], ptr [[N_CASTED]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[N_CASTED]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[M_ADDR]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: store i32 [[TMP25]], ptr [[M_CASTED]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[M_CASTED]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i32 [[TMP20]], i32 [[TMP22]], i32 [[TMP24]], i32 [[TMP26]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group !6 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[N_ADDR]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP23]], ptr [[N_CASTED]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[N_CASTED]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[M_ADDR]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP25]], ptr [[M_CASTED]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[M_CASTED]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i32 [[TMP20]], i32 [[TMP22]], i32 [[TMP24]], i32 [[TMP26]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]), !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP27:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP27]], [[TMP28]] -// CHECK11-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK11-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 0, [[MUL17]] // CHECK11-NEXT: store i32 [[ADD18]], ptr [[I11]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP34]], 0 // CHECK11-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK11-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 @@ -1774,18 +1774,18 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1794,52 +1794,52 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CONV11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CONV12:%.*]] = zext i32 [[TMP11]] to i64 // CHECK11-NEXT: store i64 [[CONV11]], ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 [[CONV12]], ptr [[DOTOMP_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP15:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP16:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] // CHECK11-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB17:%.*]] = sub nsw i32 [[TMP22]], 0 // CHECK11-NEXT: [[DIV18:%.*]] = sdiv i32 [[SUB17]], 1 // CHECK11-NEXT: [[MUL19:%.*]] = mul nsw i32 1, [[DIV18]] @@ -1848,16 +1848,16 @@ // CHECK11-NEXT: [[MUL22:%.*]] = mul nsw i64 [[DIV21]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL22]] // CHECK11-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK11-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK11-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] // CHECK11-NEXT: [[CONV27:%.*]] = sext i32 [[MUL26]] to i64 // CHECK11-NEXT: [[DIV28:%.*]] = sdiv i64 [[TMP24]], [[CONV27]] -// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK11-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 // CHECK11-NEXT: [[MUL31:%.*]] = mul nsw i32 1, [[DIV30]] @@ -1867,38 +1867,38 @@ // CHECK11-NEXT: [[MUL35:%.*]] = mul nsw i64 [[SUB34]], 1 // CHECK11-NEXT: [[ADD36:%.*]] = add nsw i64 0, [[MUL35]] // CHECK11-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK11-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP28:%.*]] = mul nsw i32 [[TMP27]], [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP28]] -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP29]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX38]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX38]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP30]], 1 -// CHECK11-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP32]]) -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK11-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP35]], 0 // CHECK11-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK11-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 // CHECK11-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]] // CHECK11-NEXT: store i32 [[ADD43]], ptr [[I13]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP36]], 0 // CHECK11-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 // CHECK11-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1 @@ -1924,34 +1924,34 @@ // CHECK11-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store ptr [[A]], ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes.4, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes.4, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK11-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68(ptr [[A]]) #[[ATTR3]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1993,43 +1993,43 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group !15 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK11-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -2065,66 +2065,66 @@ // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP12]], 2 // CHECK11-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL4]] // CHECK11-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP13]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP14]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -2170,9 +2170,9 @@ // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -2180,15 +2180,15 @@ // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP9]], 0 // CHECK13-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK13-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -2196,29 +2196,29 @@ // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK13-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP10]], ptr [[DOTOMP_UB]], align 8 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP11]] // CHECK13-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: land.lhs.true: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP12]] // CHECK13-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP13]], ptr [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP14]], [[TMP15]] // CHECK13-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP17]], 0 // CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK13-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -2227,16 +2227,16 @@ // CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK13-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK13-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK13-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK13-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP19]], [[CONV22]] -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK13-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -2246,31 +2246,31 @@ // CHECK13-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK13-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK13-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64 // CHECK13-NEXT: [[TMP23:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP23]] -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP24]] to i64 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM33]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD35:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK13-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 // CHECK13-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 1 // CHECK13-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] // CHECK13-NEXT: store i32 [[ADD39]], ptr [[I9]], align 4 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK13-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 @@ -2278,7 +2278,7 @@ // CHECK13-NEXT: store i32 [[ADD43]], ptr [[J10]], align 4 // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -2302,43 +2302,43 @@ // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK13-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK13-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM6]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 // CHECK13-NEXT: store i32 2, ptr [[J]], align 4 @@ -2373,23 +2373,23 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK15-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK15-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -2397,29 +2397,29 @@ // CHECK15-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK15-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_UB]], align 8 // CHECK15-NEXT: store i32 0, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK15-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: land.lhs.true: -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK15-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP12]], [[TMP13]] // CHECK15-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP15]], 0 // CHECK15-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK15-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -2428,16 +2428,16 @@ // CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK15-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK15-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK15-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK15-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK15-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP17]], [[CONV22]] -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP19]], 0 // CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK15-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -2447,29 +2447,29 @@ // CHECK15-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK15-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK15-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 [[TMP20]], [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]] -// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP22]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD34:%.*]] = add nsw i64 [[TMP23]], 1 -// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB35:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV36:%.*]] = sdiv i32 [[SUB35]], 1 // CHECK15-NEXT: [[MUL37:%.*]] = mul nsw i32 [[DIV36]], 1 // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 0, [[MUL37]] // CHECK15-NEXT: store i32 [[ADD38]], ptr [[I9]], align 4 -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 @@ -2477,7 +2477,7 @@ // CHECK15-NEXT: store i32 [[ADD42]], ptr [[J10]], align 4 // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: -// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -2501,41 +2501,41 @@ // CHECK15-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK15-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK15-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 // CHECK15-NEXT: store i32 2, ptr [[J]], align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_if_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_if_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_if_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_if_codegen.cpp @@ -117,9 +117,9 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -189,9 +189,9 @@ // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -218,48 +218,48 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK1-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -291,55 +291,55 @@ // CHECK1-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK1-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK1-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -376,49 +376,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -448,55 +448,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK1-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP21]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -545,16 +545,16 @@ // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l85() #[[ATTR2]] -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK1-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK1-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -566,7 +566,7 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP20]] to i1 // CHECK1-NEXT: [[TMP21:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK1-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -600,7 +600,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92(i64 [[TMP13]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP33]]) // CHECK1-NEXT: ret i32 [[CALL]] // @@ -631,45 +631,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -699,55 +699,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK1-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK1-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP28]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -784,49 +784,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP31]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: call void @.omp_outlined..7(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP31]] +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK1-NEXT: call void @.omp_outlined..7(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP31]] +// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP31]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -856,55 +856,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP34]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -920,11 +920,11 @@ // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..8, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -951,58 +951,58 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..9, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..9, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP37]] // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: -// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: call void @.omp_outlined..9(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP37]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK1-NEXT: call void @.omp_outlined..9(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP37]] +// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP37]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1032,55 +1032,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK1-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK1-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP40]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1129,15 +1129,15 @@ // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l66() #[[ATTR2]] -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK1-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -1146,7 +1146,7 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP19]] to i1 // CHECK1-NEXT: [[TMP20:%.*]] = select i1 [[TOBOOL3]], i32 0, i32 1 // CHECK1-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -1204,45 +1204,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..13, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..13, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP43]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1272,55 +1272,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK1-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK1-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP46]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1357,49 +1357,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP49]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: call void @.omp_outlined..15(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: call void @.omp_outlined..15(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP49]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1429,55 +1429,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP51]] -// CHECK1-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP51]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP52]] +// CHECK1-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP52]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1493,11 +1493,11 @@ // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK1-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..16, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -1524,58 +1524,58 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..17, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..17, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP55]] // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: -// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: call void @.omp_outlined..17(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP55]] +// CHECK1-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK1-NEXT: call void @.omp_outlined..17(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP55]] +// CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK1-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1605,55 +1605,55 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57:![0-9]+]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK1-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP57]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK1-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP58]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1679,9 +1679,9 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -1751,9 +1751,9 @@ // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., i64 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -1780,48 +1780,48 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !nontemporal !10, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP9]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !nontemporal !11, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1853,55 +1853,55 @@ // CHECK3-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK3-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !nontemporal !10, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !nontemporal !11, !llvm.access.group [[ACC_GRP15]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1938,49 +1938,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP20]] +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK3-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP20]] +// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP20]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2010,55 +2010,55 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK3-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP22]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP23]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2107,16 +2107,16 @@ // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l85() #[[ATTR2]] -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK3-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK3-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2128,7 +2128,7 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 8 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP20]] to i1 // CHECK3-NEXT: [[TMP21:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK3-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -2162,7 +2162,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92(i64 [[TMP13]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP33]]) // CHECK3-NEXT: ret i32 [[CALL]] // @@ -2193,45 +2193,45 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP26]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2261,55 +2261,55 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK3-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK3-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP29]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2346,25 +2346,25 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: @@ -2379,16 +2379,16 @@ // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]) // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2418,38 +2418,38 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -2458,15 +2458,15 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2482,11 +2482,11 @@ // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..8, i64 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2516,69 +2516,69 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP5]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE5:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL2]] to i8 -// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1, !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN4:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then4: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..9, i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..9, i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP35]] // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: -// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: call void @.omp_outlined..9(ptr [[TMP15]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: call void @.omp_outlined..9(ptr [[TMP15]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP35]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_IF_END20:%.*]] // CHECK3: omp_if.else5: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND6:%.*]] // CHECK3: omp.inner.for.cond6: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] // CHECK3-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY8:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK3: omp.inner.for.body8: @@ -2586,12 +2586,12 @@ // CHECK3-NEXT: [[TMP21:%.*]] = zext i32 [[TMP20]] to i64 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK3-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 -// CHECK3-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP24]] to i1 // CHECK3-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[TOBOOL9]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL11]], ptr [[DOTCAPTURE_EXPR__CASTED10]], align 1 -// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED10]], align 8 -// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED10]], align 8, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL12:%.*]] = trunc i8 [[TMP26]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE14:%.*]] // CHECK3: omp_if.then13: @@ -2607,18 +2607,18 @@ // CHECK3: omp_if.end16: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK3: omp.inner.for.inc17: -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND6]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND6]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK3: omp.inner.for.end19: // CHECK3-NEXT: br label [[OMP_IF_END20]] // CHECK3: omp_if.end20: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK3-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2650,81 +2650,81 @@ // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP38]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP14]], 99 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]] // CHECK3: cond.true5: // CHECK3-NEXT: br label [[COND_END7:%.*]] // CHECK3: cond.false6: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END7]] // CHECK3: cond.end7: // CHECK3-NEXT: [[COND8:%.*]] = phi i32 [ 99, [[COND_TRUE5]] ], [ [[TMP15]], [[COND_FALSE6]] ] // CHECK3-NEXT: store i32 [[COND8]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK3: omp.inner.for.cond9: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK3-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK3: omp.inner.for.body11: -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK3-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 @@ -2733,19 +2733,19 @@ // CHECK3: omp.body.continue14: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK3: omp.inner.for.inc15: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP41:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK3: omp.inner.for.end17: // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]]) -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2777,81 +2777,81 @@ // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP43]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP14]], 99 // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]] // CHECK3: cond.true5: // CHECK3-NEXT: br label [[COND_END7:%.*]] // CHECK3: cond.false6: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END7]] // CHECK3: cond.end7: // CHECK3-NEXT: [[COND8:%.*]] = phi i32 [ 99, [[COND_TRUE5]] ], [ [[TMP15]], [[COND_FALSE6]] ] // CHECK3-NEXT: store i32 [[COND8]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK3: omp.inner.for.cond9: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK3-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK3: omp.inner.for.body11: -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK3-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 @@ -2860,19 +2860,19 @@ // CHECK3: omp.body.continue14: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK3: omp.inner.for.inc15: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK3-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP45:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK3: omp.inner.for.end17: // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]]) -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2921,15 +2921,15 @@ // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l66() #[[ATTR2]] -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK3-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -2938,7 +2938,7 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 8 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK3-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP19]] to i1 // CHECK3-NEXT: [[TMP20:%.*]] = select i1 [[TOBOOL3]], i32 0, i32 1 // CHECK3-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -2996,45 +2996,45 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]] // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]] // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..14, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..14, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP47]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3064,55 +3064,55 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK3-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]] +// CHECK3-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP50]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3149,25 +3149,25 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: @@ -3182,16 +3182,16 @@ // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]) // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3221,38 +3221,38 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -3261,15 +3261,15 @@ // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3285,11 +3285,11 @@ // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK3-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..17, i64 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -3316,58 +3316,58 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..18, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..18, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP55]] // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: -// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: call void @.omp_outlined..18(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP55]] +// CHECK3-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK3-NEXT: call void @.omp_outlined..18(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP55]] +// CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3397,55 +3397,55 @@ // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK3-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57:![0-9]+]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK3-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP57]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK3-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP58]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3477,54 +3477,54 @@ // CHECK5-NEXT: [[I6:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK5-NEXT: store i32 0, ptr @Arg, align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: store i32 0, ptr @Arg, align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 100, ptr [[I]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK5: omp.inner.for.cond7: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK5: omp.inner.for.body9: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK5-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP6]] +// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP7]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK5: omp.body.continue12: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK5: omp.inner.for.inc13: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end15: // CHECK5-NEXT: store i32 100, ptr [[I6]], align 4 // CHECK5-NEXT: ret void @@ -3553,87 +3553,87 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 100, ptr [[I]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK5: omp.inner.for.cond7: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK5: omp.inner.for.body9: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK5-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK5-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP13]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK5: omp.body.continue12: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK5: omp.inner.for.inc13: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK5: omp.inner.for.end15: // CHECK5-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK5: omp.inner.for.cond21: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK5-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK5: omp.inner.for.body23: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK5-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK5-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK5-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK5-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP16]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK5: omp.body.continue26: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK5: omp.inner.for.inc27: -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK5-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK5: omp.inner.for.end29: // CHECK5-NEXT: store i32 100, ptr [[I20]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr @Arg, align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP16]]) // CHECK5-NEXT: ret i32 [[CALL]] // @@ -3661,84 +3661,84 @@ // CHECK5-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK5-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP18]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK5-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP19]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 100, ptr [[I]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK5: omp.inner.for.cond7: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK5: omp.inner.for.body9: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK5-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP21]] +// CHECK5-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK5-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP22]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK5: omp.body.continue12: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK5: omp.inner.for.inc13: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK5: omp.inner.for.end15: // CHECK5-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK5-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK5: omp.inner.for.cond21: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK5-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK5: omp.inner.for.body23: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK5-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK5-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK5-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP24]] +// CHECK5-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK5-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP25]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK5: omp.body.continue26: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK5: omp.inner.for.inc27: -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK5-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK5: omp.inner.for.end29: // CHECK5-NEXT: store i32 100, ptr [[I20]], align 4 // CHECK5-NEXT: ret i32 0 @@ -3759,54 +3759,54 @@ // CHECK7-NEXT: [[I6:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK7-NEXT: store i32 0, ptr @Arg, align 4, !nontemporal !3, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: store i32 0, ptr @Arg, align 4, !nontemporal !4, !llvm.access.group [[ACC_GRP3]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 100, ptr [[I]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK7: omp.inner.for.cond7: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK7: omp.inner.for.body9: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK7-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK7-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP8]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK7: omp.body.continue12: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK7: omp.inner.for.inc13: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK7-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end15: // CHECK7-NEXT: store i32 100, ptr [[I6]], align 4 // CHECK7-NEXT: ret void @@ -3835,42 +3835,42 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK7-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 100, ptr [[I]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK7: omp.inner.for.cond7: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK7: omp.inner.for.body9: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] // CHECK7-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4 @@ -3879,55 +3879,55 @@ // CHECK7: omp.body.continue12: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK7: omp.inner.for.inc13: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK7-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK7: omp.inner.for.end15: // CHECK7-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] // CHECK7: omp.inner.for.cond22: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK7-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] // CHECK7: omp.inner.for.body24: -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK7-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] -// CHECK7-NEXT: store i32 [[ADD26]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK7-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: store i32 [[ADD26]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK7-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP16]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] // CHECK7: omp.body.continue27: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] // CHECK7: omp.inner.for.inc28: -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK7-NEXT: store i32 [[ADD29]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD29]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK7: omp.inner.for.end30: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] // CHECK7: omp.inner.for.cond31: -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK7-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] // CHECK7: omp.inner.for.body33: -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK7-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] // CHECK7-NEXT: store i32 [[ADD35]], ptr [[I20]], align 4 @@ -3936,15 +3936,15 @@ // CHECK7: omp.body.continue36: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] // CHECK7: omp.inner.for.inc37: -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK7-NEXT: store i32 [[ADD38]], ptr [[DOTOMP_IV19]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK7: omp.inner.for.end39: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: store i32 100, ptr [[I20]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr @Arg, align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) // CHECK7-NEXT: ret i32 [[CALL]] // @@ -3972,42 +3972,42 @@ // CHECK7-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK7-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP19]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK7-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP20]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 100, ptr [[I]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK7: omp.inner.for.cond7: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK7: omp.inner.for.body9: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] // CHECK7-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4 @@ -4016,40 +4016,40 @@ // CHECK7: omp.body.continue12: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK7: omp.inner.for.inc13: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK7-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK7: omp.inner.for.end15: // CHECK7-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK7-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK7: omp.inner.for.cond21: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK7-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK7: omp.inner.for.body23: -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK7-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK7-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK7-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP23]] +// CHECK7-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK7-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP24]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK7: omp.body.continue26: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK7: omp.inner.for.inc27: -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK7-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK7: omp.inner.for.end29: // CHECK7-NEXT: store i32 100, ptr [[I20]], align 4 // CHECK7-NEXT: ret i32 0 @@ -4064,9 +4064,9 @@ // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9:![0-9]+]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -4136,9 +4136,9 @@ // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -4165,48 +4165,48 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4238,55 +4238,55 @@ // CHECK9-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK9-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK9-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4323,49 +4323,49 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4395,55 +4395,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP22]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4492,16 +4492,16 @@ // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l85() #[[ATTR2]] -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK9-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK9-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK9: omp_if.then: @@ -4513,7 +4513,7 @@ // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP20]] to i1 // CHECK9-NEXT: [[TMP21:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK9-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -4547,7 +4547,7 @@ // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92(i64 [[TMP13]]) #[[ATTR2]] // CHECK9-NEXT: br label [[OMP_IF_END]] // CHECK9: omp_if.end: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP33]]) // CHECK9-NEXT: ret i32 [[CALL]] // @@ -4578,45 +4578,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP25]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4646,55 +4646,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK9-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP27]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK9-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP28]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4731,49 +4731,49 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP31]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP31]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: call void @.omp_outlined..7(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP30]] +// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP31]] +// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP31]] +// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK9-NEXT: call void @.omp_outlined..7(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP31]] +// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP31]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4803,55 +4803,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK9-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP33]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK9-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP34]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4867,11 +4867,11 @@ // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..8, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -4898,58 +4898,58 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK9: omp_if.then: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..9, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..9, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP37]] // CHECK9-NEXT: br label [[OMP_IF_END:%.*]] // CHECK9: omp_if.else: -// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: call void @.omp_outlined..9(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP37]] +// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK9-NEXT: call void @.omp_outlined..9(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP37]] +// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP37]] // CHECK9-NEXT: br label [[OMP_IF_END]] // CHECK9: omp_if.end: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -4979,55 +4979,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK9-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP39]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK9-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP40]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5076,15 +5076,15 @@ // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l66() #[[ATTR2]] -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK9-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -5093,7 +5093,7 @@ // CHECK9-NEXT: store ptr null, ptr [[TMP16]], align 8 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK9-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP19]] to i1 // CHECK9-NEXT: [[TMP20:%.*]] = select i1 [[TOBOOL3]], i32 0, i32 1 // CHECK9-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -5151,45 +5151,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..13, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP42]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..13, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP43]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5219,55 +5219,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK9-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP45]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK9-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP46]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5304,49 +5304,49 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP49]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP49]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: call void @.omp_outlined..15(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP48]] +// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP49]] +// CHECK9-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP49]] +// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK9-NEXT: call void @.omp_outlined..15(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP49]] +// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP49]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5376,55 +5376,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP51]] -// CHECK9-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP51]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP52]] +// CHECK9-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP52]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP51]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP52]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5440,11 +5440,11 @@ // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK9-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..16, i64 [[TMP1]]) // CHECK9-NEXT: ret void // @@ -5471,58 +5471,58 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54:![0-9]+]] -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK9-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK9: omp_if.then: -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..17, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..17, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP55]] // CHECK9-NEXT: br label [[OMP_IF_END:%.*]] // CHECK9: omp_if.else: -// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: call void @.omp_outlined..17(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] +// CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP55]] +// CHECK9-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK9-NEXT: call void @.omp_outlined..17(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP55]] +// CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] // CHECK9-NEXT: br label [[OMP_IF_END]] // CHECK9: omp_if.end: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5552,55 +5552,55 @@ // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57:![0-9]+]] -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK9-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP57]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK9-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP58]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -5626,9 +5626,9 @@ // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9:![0-9]+]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 // CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -5698,9 +5698,9 @@ // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 // CHECK11-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[ARG_CASTED]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., i64 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -5727,48 +5727,48 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !nontemporal !10, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP9]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !nontemporal !11, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: store i32 [[TMP11]], ptr [[ARG_CASTED]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARG_CASTED]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]], i64 [[TMP12]]), !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK11-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -5800,55 +5800,55 @@ // CHECK11-NEXT: store i64 [[ARG]], ptr [[ARG_ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK11-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !nontemporal !10, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK11-NEXT: store i32 0, ptr [[ARG_ADDR]], align 4, !nontemporal !11, !llvm.access.group [[ACC_GRP15]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -5885,49 +5885,49 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: call void @.omp_outlined..3(ptr [[TMP11]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -5957,55 +5957,55 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP23]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6054,16 +6054,16 @@ // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l85() #[[ATTR2]] -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK11-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK11-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK11: omp_if.then: @@ -6075,7 +6075,7 @@ // CHECK11-NEXT: store ptr null, ptr [[TMP17]], align 8 // CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP20:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP20]] to i1 // CHECK11-NEXT: [[TMP21:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK11-NEXT: [[KERNEL_ARGS6:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -6109,7 +6109,7 @@ // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l92(i64 [[TMP13]]) #[[ATTR2]] // CHECK11-NEXT: br label [[OMP_IF_END]] // CHECK11: omp_if.end: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP33]]) // CHECK11-NEXT: ret i32 [[CALL]] // @@ -6140,45 +6140,45 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP26]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6208,55 +6208,55 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK11-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP28]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK11-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP29]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6293,25 +6293,25 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: @@ -6326,16 +6326,16 @@ // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6365,38 +6365,38 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -6405,15 +6405,15 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6429,11 +6429,11 @@ // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..8, i64 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -6463,69 +6463,69 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP5:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP5]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE5:%.*]] // CHECK11: omp_if.then: // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL2]] to i8 -// CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1, !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN4:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK11: omp_if.then4: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..9, i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..9, i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]), !llvm.access.group [[ACC_GRP35]] // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] // CHECK11: omp_if.else: -// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: call void @.omp_outlined..9(ptr [[TMP15]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: call void @.omp_outlined..9(ptr [[TMP15]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]], i64 [[TMP13]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP35]] // CHECK11-NEXT: br label [[OMP_IF_END]] // CHECK11: omp_if.end: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_IF_END20:%.*]] // CHECK11: omp_if.else5: // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND6:%.*]] // CHECK11: omp.inner.for.cond6: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] // CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY8:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK11: omp.inner.for.body8: @@ -6533,12 +6533,12 @@ // CHECK11-NEXT: [[TMP21:%.*]] = zext i32 [[TMP20]] to i64 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK11-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 -// CHECK11-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP24:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL9:%.*]] = trunc i8 [[TMP24]] to i1 // CHECK11-NEXT: [[FROMBOOL11:%.*]] = zext i1 [[TOBOOL9]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL11]], ptr [[DOTCAPTURE_EXPR__CASTED10]], align 1 -// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED10]], align 8 -// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED10]], align 8, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL12:%.*]] = trunc i8 [[TMP26]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE14:%.*]] // CHECK11: omp_if.then13: @@ -6554,18 +6554,18 @@ // CHECK11: omp_if.end16: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK11: omp.inner.for.inc17: -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK11-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND6]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND6]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK11: omp.inner.for.end19: // CHECK11-NEXT: br label [[OMP_IF_END20]] // CHECK11: omp_if.end20: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP31:%.*]] = icmp ne i32 [[TMP30]], 0 // CHECK11-NEXT: br i1 [[TMP31]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6597,81 +6597,81 @@ // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK11: omp_if.then: // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP38]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP39]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] // CHECK11: omp_if.else: // CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP14]], 99 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]] // CHECK11: cond.true5: // CHECK11-NEXT: br label [[COND_END7:%.*]] // CHECK11: cond.false6: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END7]] // CHECK11: cond.end7: // CHECK11-NEXT: [[COND8:%.*]] = phi i32 [ 99, [[COND_TRUE5]] ], [ [[TMP15]], [[COND_FALSE6]] ] // CHECK11-NEXT: store i32 [[COND8]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK11: omp.inner.for.cond9: -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK11: omp.inner.for.body11: -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK11-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 @@ -6680,19 +6680,19 @@ // CHECK11: omp.body.continue14: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK11: omp.inner.for.inc15: -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP41:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK11: omp.inner.for.end17: // CHECK11-NEXT: br label [[OMP_IF_END]] // CHECK11: omp_if.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]]) -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6724,81 +6724,81 @@ // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK11: omp_if.then: // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP42]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP43]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] // CHECK11: omp_if.else: // CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP13]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP14]], 99 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]] // CHECK11: cond.true5: // CHECK11-NEXT: br label [[COND_END7:%.*]] // CHECK11: cond.false6: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END7]] // CHECK11: cond.end7: // CHECK11-NEXT: [[COND8:%.*]] = phi i32 [ 99, [[COND_TRUE5]] ], [ [[TMP15]], [[COND_FALSE6]] ] // CHECK11-NEXT: store i32 [[COND8]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP16]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK11: omp.inner.for.cond9: -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK11: omp.inner.for.body11: -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK11-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 @@ -6807,19 +6807,19 @@ // CHECK11: omp.body.continue14: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK11: omp.inner.for.inc15: -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP45:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK11: omp.inner.for.end17: // CHECK11-NEXT: br label [[OMP_IF_END]] // CHECK11: omp_if.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]]) -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -6868,15 +6868,15 @@ // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l66() #[[ATTR2]] -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK11-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store i64 [[TMP13]], ptr [[TMP14]], align 8 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -6885,7 +6885,7 @@ // CHECK11-NEXT: store ptr null, ptr [[TMP16]], align 8 // CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK11-NEXT: [[TMP19:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP19]] to i1 // CHECK11-NEXT: [[TMP20:%.*]] = select i1 [[TOBOOL3]], i32 0, i32 1 // CHECK11-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -6943,45 +6943,45 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP47]] // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP47]] // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..14, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP46]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..14, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP47]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP47]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7011,55 +7011,55 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK11-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP49]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP50]] +// CHECK11-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP50]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP50]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7096,25 +7096,25 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: @@ -7129,16 +7129,16 @@ // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7168,38 +7168,38 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4 @@ -7208,15 +7208,15 @@ // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7232,11 +7232,11 @@ // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK11-NEXT: [[TMP0:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP0]] to i1 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK11-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined..17, i64 [[TMP1]]) // CHECK11-NEXT: ret void // @@ -7263,58 +7263,58 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP55]] // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK11-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP11]] to i1 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK11: omp_if.then: -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..18, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..18, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP55]] // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] // CHECK11: omp_if.else: -// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: call void @.omp_outlined..18(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: call void @__kmpc_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] +// CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group [[ACC_GRP55]] +// CHECK11-NEXT: store i32 0, ptr [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK11-NEXT: call void @.omp_outlined..18(ptr [[TMP12]], ptr [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group [[ACC_GRP55]] +// CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(ptr @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group [[ACC_GRP55]] // CHECK11-NEXT: br label [[OMP_IF_END]] // CHECK11: omp_if.end: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP54]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP55]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP54]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP55]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK11-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7344,55 +7344,55 @@ // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK11-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK11-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58:![0-9]+]], !noundef [[NOUNDEF9]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK11-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP57]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK11-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP58]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]], !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP57]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP58]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF9]] // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -7424,54 +7424,54 @@ // CHECK13-NEXT: [[I6:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: store i32 0, ptr @Arg, align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: store i32 0, ptr @Arg, align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 100, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK13: omp.inner.for.cond7: -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK13: omp.inner.for.body9: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK13: omp.body.continue12: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK13: omp.inner.for.inc13: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end15: // CHECK13-NEXT: store i32 100, ptr [[I6]], align 4 // CHECK13-NEXT: ret void @@ -7500,87 +7500,87 @@ // CHECK13-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP10]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 100, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK13: omp.inner.for.cond7: -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK13: omp.inner.for.body9: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK13-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: call void @_Z3fn5v(), !llvm.access.group [[ACC_GRP13]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK13: omp.body.continue12: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK13: omp.inner.for.inc13: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK13: omp.inner.for.end15: // CHECK13-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4 +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK13-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK13: omp.inner.for.cond21: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK13-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK13: omp.inner.for.body23: -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK13-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK13-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP16]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK13: omp.body.continue26: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK13: omp.inner.for.inc27: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK13-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK13: omp.inner.for.end29: // CHECK13-NEXT: store i32 100, ptr [[I20]], align 4 -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr @Arg, align 4 +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP16]]) // CHECK13-NEXT: ret i32 [[CALL]] // @@ -7608,84 +7608,84 @@ // CHECK13-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK13-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP19]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 100, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK13: omp.inner.for.cond7: -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK13: omp.inner.for.body9: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK13-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP21]] +// CHECK13-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK13-NEXT: call void @_Z3fn2v(), !llvm.access.group [[ACC_GRP22]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK13: omp.body.continue12: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK13: omp.inner.for.inc13: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK13: omp.inner.for.end15: // CHECK13-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK13-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK13-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK13: omp.inner.for.cond21: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK13-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK13: omp.inner.for.body23: -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK13-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP25]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK13: omp.body.continue26: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK13: omp.inner.for.inc27: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK13-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK13: omp.inner.for.end29: // CHECK13-NEXT: store i32 100, ptr [[I20]], align 4 // CHECK13-NEXT: ret i32 0 @@ -7706,54 +7706,54 @@ // CHECK15-NEXT: [[I6:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK15-NEXT: store i32 0, ptr @Arg, align 4, !nontemporal !3, !llvm.access.group [[ACC_GRP2]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: store i32 0, ptr @Arg, align 4, !nontemporal !4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 100, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK15: omp.inner.for.cond7: -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK15: omp.inner.for.body9: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK15-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: call void @_Z9gtid_testv(), !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK15: omp.body.continue12: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK15: omp.inner.for.inc13: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK15-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end15: // CHECK15-NEXT: store i32 100, ptr [[I6]], align 4 // CHECK15-NEXT: ret void @@ -7782,42 +7782,42 @@ // CHECK15-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: call void @_Z3fn4v(), !llvm.access.group [[ACC_GRP11]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 100, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK15: omp.inner.for.cond7: -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK15: omp.inner.for.body9: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] // CHECK15-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4 @@ -7826,55 +7826,55 @@ // CHECK15: omp.body.continue12: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK15: omp.inner.for.inc13: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK15-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK15: omp.inner.for.end15: // CHECK15-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK15-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 -// CHECK15-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP12:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 // CHECK15-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK15: omp_if.then: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] // CHECK15: omp.inner.for.cond22: -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK15-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] // CHECK15: omp.inner.for.body24: -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK15-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] -// CHECK15-NEXT: store i32 [[ADD26]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK15-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: store i32 [[ADD26]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: call void @_Z3fn6v(), !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] // CHECK15: omp.body.continue27: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] // CHECK15: omp.inner.for.inc28: -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK15-NEXT: store i32 [[ADD29]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD29]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK15: omp.inner.for.end30: // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] // CHECK15: omp_if.else: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] // CHECK15: omp.inner.for.cond31: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4 +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK15-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] // CHECK15: omp.inner.for.body33: -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK15-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] // CHECK15-NEXT: store i32 [[ADD35]], ptr [[I20]], align 4 @@ -7883,15 +7883,15 @@ // CHECK15: omp.body.continue36: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] // CHECK15: omp.inner.for.inc37: -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK15-NEXT: store i32 [[ADD38]], ptr [[DOTOMP_IV19]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK15: omp.inner.for.end39: // CHECK15-NEXT: br label [[OMP_IF_END]] // CHECK15: omp_if.end: // CHECK15-NEXT: store i32 100, ptr [[I20]], align 4 -// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr @Arg, align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) // CHECK15-NEXT: ret i32 [[CALL]] // @@ -7919,42 +7919,42 @@ // CHECK15-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK15-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: call void @_Z3fn1v(), !llvm.access.group [[ACC_GRP20]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 100, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB4]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK15: omp.inner.for.cond7: -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK15: omp.inner.for.body9: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] // CHECK15-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4 @@ -7963,40 +7963,40 @@ // CHECK15: omp.body.continue12: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK15: omp.inner.for.inc13: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK15-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK15: omp.inner.for.end15: // CHECK15-NEXT: store i32 100, ptr [[I6]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK15-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB17]], align 4 // CHECK15-NEXT: store i32 99, ptr [[DOTOMP_UB18]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB17]], align 4, !noundef [[NOUNDEF2]] // CHECK15-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV19]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] // CHECK15: omp.inner.for.cond21: -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB18]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]] // CHECK15-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] // CHECK15: omp.inner.for.body23: -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP14]], 1 // CHECK15-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] -// CHECK15-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK15-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP23]] +// CHECK15-NEXT: store i32 [[ADD25]], ptr [[I20]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK15-NEXT: call void @_Z3fn3v(), !llvm.access.group [[ACC_GRP24]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] // CHECK15: omp.body.continue26: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] // CHECK15: omp.inner.for.inc27: -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]], !noundef [[NOUNDEF2]] // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK15-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP23]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP24:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD28]], ptr [[DOTOMP_IV19]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK15: omp.inner.for.end29: // CHECK15-NEXT: store i32 100, ptr [[I20]], align 4 // CHECK15-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_proc_bind_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_proc_bind_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_proc_bind_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_proc_bind_codegen.cpp @@ -137,46 +137,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group !6 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !6 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -206,54 +206,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -289,46 +289,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group !15 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -358,54 +358,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -474,46 +474,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group !21 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !21 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -543,54 +543,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -624,52 +624,52 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK3: omp.body.continue12: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK3: omp.inner.for.inc13: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end15: // CHECK3-NEXT: store i32 1000, ptr [[I6]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -686,27 +686,27 @@ // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_reduction_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_reduction_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_reduction_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_reduction_codegen.cpp @@ -101,34 +101,34 @@ // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(ptr @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -172,45 +172,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group !5 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -219,21 +219,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -264,9 +264,9 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -274,49 +274,49 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[SIVAR2]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -325,21 +325,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -353,15 +353,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -373,15 +373,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -396,36 +396,36 @@ // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK1-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -468,45 +468,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i64 [[TMP9]], i64 [[TMP11]], ptr [[T_VAR1]]), !llvm.access.group !14 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i64 [[TMP9]], i64 [[TMP11]], ptr [[T_VAR1]]), !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -515,21 +515,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -560,9 +560,9 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -570,49 +570,49 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR2]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[T_VAR2]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -621,21 +621,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR2]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR2]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -649,15 +649,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -669,15 +669,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -699,34 +699,34 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l66(ptr @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -770,43 +770,43 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[SIVAR1]]), !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -815,21 +815,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -860,57 +860,57 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[SIVAR1]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -919,21 +919,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -947,15 +947,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -967,15 +967,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -990,36 +990,36 @@ // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP16]], align 8 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK3-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1062,43 +1062,43 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i32 [[TMP8]], i32 [[TMP9]], ptr [[T_VAR1]]), !llvm.access.group !15 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i32 [[TMP8]], i32 [[TMP9]], ptr [[T_VAR1]]), !llvm.access.group [[ACC_GRP16]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1107,21 +1107,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -1152,57 +1152,57 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1211,21 +1211,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -1239,15 +1239,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -1259,15 +1259,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -1323,45 +1323,45 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK5-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK5-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group !4 +// CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP5]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -1370,21 +1370,21 @@ // CHECK5: .omp.final.done: // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK5-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK5-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK5-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK5-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK5-NEXT: ] // CHECK5: .omp.reduction.case1: -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK5-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK5-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK5: .omp.reduction.case2: -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK5-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK5: .omp.reduction.default: // CHECK5-NEXT: ret void @@ -1416,9 +1416,9 @@ // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK5-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -1426,52 +1426,52 @@ // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: store i32 0, ptr [[SIVAR2]], align 4 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK5-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK5-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK5-NEXT: store ptr [[SIVAR2]], ptr [[TMP13]], align 8, !llvm.access.group !8 -// CHECK5-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !8 +// CHECK5-NEXT: store ptr [[SIVAR2]], ptr [[TMP13]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group [[ACC_GRP9]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK5-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK5-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -1480,21 +1480,21 @@ // CHECK5: .omp.final.done: // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK5-NEXT: store ptr [[SIVAR2]], ptr [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK5-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK5-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK5-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK5-NEXT: ] // CHECK5: .omp.reduction.case1: -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] // CHECK5-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK5-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK5: .omp.reduction.case2: -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK5-NEXT: [[TMP24:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4 +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP22:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK5: .omp.reduction.default: // CHECK5-NEXT: ret void @@ -1508,15 +1508,15 @@ // CHECK5-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK5-NEXT: ret void // // @@ -1528,15 +1528,15 @@ // CHECK5-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK5-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK5-NEXT: ret void // // @@ -1560,36 +1560,36 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -1611,37 +1611,37 @@ // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !6 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF2]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: ret i32 0 // @@ -1659,36 +1659,36 @@ // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK9-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK9-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF3]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() @@ -1710,37 +1710,37 @@ // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK9-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !7 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK9-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF3]] +// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK9-NEXT: ret i32 0 // diff --git a/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp @@ -336,30 +336,30 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], ptr [[__VLA_EXPR1]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP9]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP10]], ptr [[TMP15]], align 8 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -381,18 +381,18 @@ // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP27]], ptr [[TMP26]], align 4 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP31]], ptr [[TMP30]], align 4 // CHECK1-NEXT: [[TMP32:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP32]], i32 0, i32 0 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP33]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP32]], i32 0, i32 1 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP36]], i32 0, i32 0 @@ -402,16 +402,16 @@ // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 2 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP39]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP41]], ptr [[TMP40]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP32]]) -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP43]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i64 [[TMP44]]) #[[ATTR4:[0-9]+]] -// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP45]], ptr [[AA_CASTED4]], align 2 -// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -446,13 +446,13 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP46]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP63]], ptr [[A_CASTED8]], align 4 -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP65]], ptr [[AA_CASTED9]], align 2 -// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP67]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -501,10 +501,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP64]], i64 [[TMP66]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP87]], ptr [[A_CASTED17]], align 4 -// CHECK1-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED17]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED17]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP89]], 20 // CHECK1-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK1: omp_if.then19: @@ -604,7 +604,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP88]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END28]] // CHECK1: omp_if.end28: -// CHECK1-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP137:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP137]]) // CHECK1-NEXT: ret i32 [[TMP136]] @@ -621,12 +621,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK1-NEXT: ret void // @@ -652,45 +652,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -750,64 +750,64 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !27 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !27 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK1-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !26 +// CHECK1-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !27 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !26 +// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK1-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !26 +// CHECK1-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !27 // CHECK1-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK1-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp_offload.failed.i: -// CHECK1-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK1-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !26 -// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !26 -// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26 -// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK1-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !26 -// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !26 +// CHECK1-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !27 +// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 +// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 +// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP30]], i64 [[TMP32]], i64 [[TMP34]]) #[[ATTR4]] // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: @@ -820,9 +820,9 @@ // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -848,48 +848,48 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A1]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK1-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -905,9 +905,9 @@ // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i64 [[TMP1]]) // CHECK1-NEXT: ret void // @@ -933,50 +933,50 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK1-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK1-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -995,12 +995,12 @@ // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1028,53 +1028,53 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1107,16 +1107,16 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..9, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK1-NEXT: ret void // @@ -1154,11 +1154,11 @@ // CHECK1-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 0 @@ -1168,81 +1168,81 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK1-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK1-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK1-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK1-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK1-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK1-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK1-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK1-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP20]] // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i64 3 -// CHECK1-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK1-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK1-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK1-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK1-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK1-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK1-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK1-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1260,27 +1260,27 @@ // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP8]] // // @@ -1301,20 +1301,20 @@ // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1391,9 +1391,9 @@ // CHECK1-NEXT: [[TMP40:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP40]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -1423,19 +1423,19 @@ // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK1-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1471,19 +1471,19 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK1-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -1517,7 +1517,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP43]] // // @@ -1537,13 +1537,13 @@ // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, ptr [[A]], align 4 // CHECK1-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: @@ -1598,7 +1598,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: ret i32 [[TMP27]] // // @@ -1616,13 +1616,13 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..12, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK1-NEXT: ret void // @@ -1651,68 +1651,68 @@ // CHECK1-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK1-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK1-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK1-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP14]] // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP39]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -1740,18 +1740,18 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1785,99 +1785,99 @@ // CHECK1-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK1-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK1-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK1-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK1-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK1-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK1-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK1-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK1-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK1-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK1-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK1-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP42]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK1-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK1-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK1-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK1-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK1-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -1904,12 +1904,12 @@ // CHECK1-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -1940,57 +1940,57 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -2050,28 +2050,28 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP7]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP8]], ptr [[TMP13]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -2093,18 +2093,18 @@ // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP25]], ptr [[TMP24]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP30]], i32 0, i32 0 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP31]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4 +// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP33]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP30]], i32 0, i32 1 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP34]], i32 0, i32 0 @@ -2114,16 +2114,16 @@ // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 2 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP37]], ptr align 4 [[TMP23]], i32 12, i1 false) // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP39]], ptr [[TMP38]], align 4 // CHECK3-NEXT: [[TMP40:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP30]]) -// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP41]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l104(i32 [[TMP42]]) #[[ATTR4:[0-9]+]] -// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP43]], ptr [[AA_CASTED4]], align 2 -// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 +// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP44]], ptr [[TMP45]], align 4 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -2158,13 +2158,13 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP44]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP61]], ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4 -// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP63]], ptr [[AA_CASTED9]], align 2 -// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4 -// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -2213,10 +2213,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP62]], i32 [[TMP64]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP85]], ptr [[A_CASTED17]], align 4 -// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED17]], align 4 -// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED17]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP87]], 20 // CHECK3-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK3: omp_if.then19: @@ -2318,7 +2318,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP86]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END28]] // CHECK3: omp_if.end28: -// CHECK3-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP137:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP137]]) // CHECK3-NEXT: ret i32 [[TMP136]] @@ -2335,12 +2335,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK3-NEXT: ret void // @@ -2366,45 +2366,45 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK3-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2464,64 +2464,64 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !28 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK3-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !27 +// CHECK3-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !27 +// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !28 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK3-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !27 +// CHECK3-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !28 // CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK3-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: omp_offload.failed.i: -// CHECK3-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK3-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !27 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK3-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK3-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 -// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 +// CHECK3-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !28 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !28 +// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP30]], i32 [[TMP32]], i32 [[TMP34]]) #[[ATTR4]] // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: @@ -2534,9 +2534,9 @@ // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2562,48 +2562,48 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A1]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK3-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2619,9 +2619,9 @@ // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i32 [[TMP1]]) // CHECK3-NEXT: ret void // @@ -2647,50 +2647,50 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK3-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK3-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2709,12 +2709,12 @@ // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // @@ -2742,53 +2742,53 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2821,16 +2821,16 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..9, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK3-NEXT: ret void // @@ -2868,11 +2868,11 @@ // CHECK3-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 0 @@ -2882,81 +2882,81 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK3-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK3-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK3-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK3-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK3-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK3-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK3-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP20]] // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i32 3 -// CHECK3-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK3-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK3-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK3-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK3-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK3-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK3-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK3-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -2974,27 +2974,27 @@ // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP8]] // // @@ -3015,19 +3015,19 @@ // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3105,9 +3105,9 @@ // CHECK3-NEXT: [[TMP40:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP40]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX2]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP41]] to i32 -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP42]] // CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP43]]) @@ -3137,19 +3137,19 @@ // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK3-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3185,19 +3185,19 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK3-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -3231,7 +3231,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP43]] // // @@ -3251,13 +3251,13 @@ // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[A]], align 4 // CHECK3-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: @@ -3312,7 +3312,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: ret i32 [[TMP27]] // // @@ -3330,13 +3330,13 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..12, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK3-NEXT: ret void // @@ -3365,68 +3365,68 @@ // CHECK3-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK3-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK3-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK3-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP14]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP40]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3454,18 +3454,18 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3499,99 +3499,99 @@ // CHECK3-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK3-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK3-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK3-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK3-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42:![0-9]+]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK3-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK3-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK3-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK3-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK3-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK3-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK3-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP43]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK3-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP43]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] +// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK3-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP42]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP43]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK3-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK3-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK3-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -3618,12 +3618,12 @@ // CHECK3-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -3654,57 +3654,57 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK3-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -3764,30 +3764,30 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10:![0-9]+]] // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK5-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK5-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK5-NEXT: store i64 [[TMP5]], ptr [[__VLA_EXPR1]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP9]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8 +// CHECK5-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: store i64 [[TMP10]], ptr [[TMP15]], align 8 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -3809,18 +3809,18 @@ // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP27:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP27]], ptr [[TMP26]], align 4 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP31]], ptr [[TMP30]], align 4 // CHECK5-NEXT: [[TMP32:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, ptr @.omp_task_entry., i64 -1) // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP32]], i32 0, i32 0 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP33]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8 +// CHECK5-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP34]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP35]], ptr align 4 [[AGG_CAPTURED]], i64 12, i1 false) // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP32]], i32 0, i32 1 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP36]], i32 0, i32 0 @@ -3830,16 +3830,16 @@ // CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 2 // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP39]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP36]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP41:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP41]], ptr [[TMP40]], align 8 // CHECK5-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP32]]) -// CHECK5-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP43]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP44:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i64 [[TMP44]]) #[[ATTR4:[0-9]+]] -// CHECK5-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP45:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP45]], ptr [[AA_CASTED4]], align 2 -// CHECK5-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8 +// CHECK5-NEXT: [[TMP46:%.*]] = load i64, ptr [[AA_CASTED4]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK5-NEXT: store i64 [[TMP46]], ptr [[TMP47]], align 8 // CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -3874,13 +3874,13 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i64 [[TMP46]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: -// CHECK5-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP63:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP63]], ptr [[A_CASTED8]], align 4 -// CHECK5-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8 -// CHECK5-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP64:%.*]] = load i64, ptr [[A_CASTED8]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP65:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP65]], ptr [[AA_CASTED9]], align 2 -// CHECK5-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8 -// CHECK5-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP66:%.*]] = load i64, ptr [[AA_CASTED9]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP67:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP67]], 10 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -3929,10 +3929,10 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i64 [[TMP64]], i64 [[TMP66]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP87:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP87]], ptr [[A_CASTED17]], align 4 -// CHECK5-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED17]], align 8 -// CHECK5-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP88:%.*]] = load i64, ptr [[A_CASTED17]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP89:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP89]], 20 // CHECK5-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK5: omp_if.then19: @@ -4032,7 +4032,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP88]], ptr [[B]], i64 [[TMP2]], ptr [[VLA]], ptr [[C]], i64 5, i64 [[TMP5]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END28]] // CHECK5: omp_if.end28: -// CHECK5-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP137:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP137]]) // CHECK5-NEXT: ret i32 [[TMP136]] @@ -4049,12 +4049,12 @@ // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK5-NEXT: ret void // @@ -4080,45 +4080,45 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK5-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4178,64 +4178,64 @@ // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK5-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) -// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 -// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !26 +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) +// CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 +// CHECK5-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !27 // CHECK5-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !26 +// CHECK5-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !27 +// CHECK5-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK5-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !26 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK5-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !26 +// CHECK5-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !27 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK5-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK5-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK5-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr null, ptr [[TMP24]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !26 +// CHECK5-NEXT: store ptr null, ptr [[TMP25]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK5-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !26 +// CHECK5-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !27 // CHECK5-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK5-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK5: omp_offload.failed.i: -// CHECK5-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK5-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !26 -// CHECK5-NEXT: [[TMP30:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK5-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !26 -// CHECK5-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !26 -// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK5-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !26 -// CHECK5-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !26 +// CHECK5-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !27 +// CHECK5-NEXT: [[TMP30:%.*]] = load i64, ptr [[AA_CASTED_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 +// CHECK5-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 +// CHECK5-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 8, !noalias !27, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i64 [[TMP30]], i64 [[TMP32]], i64 [[TMP34]]) #[[ATTR4]] // CHECK5-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK5: .omp_outlined..1.exit: @@ -4248,9 +4248,9 @@ // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i64 [[TMP1]]) // CHECK5-NEXT: ret void // @@ -4276,48 +4276,48 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[A1]], align 4, !nontemporal !27 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !nontemporal !27 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[A1]], align 4, !nontemporal !28 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !nontemporal !28, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4, !nontemporal !27 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4, !nontemporal !28 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK5-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4333,9 +4333,9 @@ // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i64 [[TMP1]]) // CHECK5-NEXT: ret void // @@ -4361,50 +4361,50 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK5-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK5-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK5-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4423,12 +4423,12 @@ // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i64 [[TMP1]], i64 [[TMP3]]) // CHECK5-NEXT: ret void // @@ -4456,53 +4456,53 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK5-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4535,16 +4535,16 @@ // CHECK5-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..9, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK5-NEXT: ret void // @@ -4582,11 +4582,11 @@ // CHECK5-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK5-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 0 @@ -4596,81 +4596,81 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK5-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK5-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK5-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK5-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK5-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK5-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK5-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK5-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK5-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK5-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK5-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP20]] // CHECK5-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i64 3 -// CHECK5-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK5-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK5-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK5-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK5-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK5-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP37]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK5-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK5-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -4688,27 +4688,27 @@ // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK5-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK5-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK5-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP8]] // // @@ -4731,29 +4731,29 @@ // CHECK5-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK5-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK5-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK5-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[B]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[B_CASTED]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK5-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP10]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4836,9 +4836,9 @@ // CHECK5-NEXT: [[TMP46:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP46]] // CHECK5-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 +// CHECK5-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP47]] to i32 -// CHECK5-NEXT: [[TMP48:%.*]] = load i32, ptr [[B]], align 4 +// CHECK5-NEXT: [[TMP48:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV]], [[TMP48]] // CHECK5-NEXT: [[TMP49:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(ptr [[TMP49]]) @@ -4868,19 +4868,19 @@ // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK5-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -4916,19 +4916,19 @@ // CHECK5-NEXT: store ptr null, ptr [[TMP23]], align 8 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK5-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK5-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK5-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -4962,7 +4962,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], i64 [[TMP7]], ptr [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP43]] // // @@ -4982,13 +4982,13 @@ // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, ptr [[A]], align 4 // CHECK5-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: @@ -5043,7 +5043,7 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i64 [[TMP1]], i64 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: ret i32 [[TMP27]] // // @@ -5064,18 +5064,18 @@ // CHECK5-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK5-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK5-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 6, ptr @.omp_outlined..12, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]], i64 [[TMP7]]) // CHECK5-NEXT: ret void // @@ -5106,88 +5106,88 @@ // CHECK5-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK5-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39:![0-9]+]] -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK5-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK5-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP40]] // CHECK5-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK5-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP40]] // CHECK5-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK5-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP15]] // CHECK5-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP40]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK5-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP39]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK5: omp.inner.for.cond9: -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK5-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK5: omp.inner.for.body11: -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK5-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK5-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP20]] to double // CHECK5-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK5-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: store double [[ADD15]], ptr [[A16]], align 8 // CHECK5-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 8 +// CHECK5-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[INC18:%.*]] = fadd double [[TMP21]], 1.000000e+00 // CHECK5-NEXT: store double [[INC18]], ptr [[A17]], align 8 // CHECK5-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -5199,17 +5199,17 @@ // CHECK5: omp.body.continue22: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK5: omp.inner.for.inc23: -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK5-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP42:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK5: omp.inner.for.end25: // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK5-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -5237,18 +5237,18 @@ // CHECK5-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK5-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK5-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK5-NEXT: ret void // @@ -5282,99 +5282,99 @@ // CHECK5-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK5-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK5-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK5-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK5-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK5: omp.precond.then: // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK5-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44:![0-9]+]] -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK5-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK5-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK5-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK5-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK5-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK5-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK5-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP45]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK5-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK5-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP44]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK5-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK5-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: -// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK5-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK5-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -5401,12 +5401,12 @@ // CHECK5-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK5-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK5-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK5-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK5-NEXT: ret void // @@ -5437,57 +5437,57 @@ // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47:![0-9]+]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48:![0-9]+]], !noundef [[NOUNDEF10]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK5-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK5-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP48]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP48]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]], !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP47]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF10]] // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: @@ -5547,28 +5547,28 @@ // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] // CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK7-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK7-NEXT: store i32 [[TMP3]], ptr [[__VLA_EXPR1]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP7:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP7]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP9]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP11]], ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED3]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: store i32 [[TMP8]], ptr [[TMP13]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -5590,18 +5590,18 @@ // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP25:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP25]], ptr [[TMP24]], align 4 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP27]], ptr [[TMP26]], align 4 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4 // CHECK7-NEXT: [[TMP30:%.*]] = call ptr @__kmpc_omp_target_task_alloc(ptr @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, ptr @.omp_task_entry., i64 -1) // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP30]], i32 0, i32 0 // CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP31]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4 +// CHECK7-NEXT: [[TMP33:%.*]] = load ptr, ptr [[TMP32]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP33]], ptr align 4 [[AGG_CAPTURED]], i32 12, i1 false) // CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP30]], i32 0, i32 1 // CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP34]], i32 0, i32 0 @@ -5611,16 +5611,16 @@ // CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 2 // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP37]], ptr align 4 [[TMP23]], i32 12, i1 false) // CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP34]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP39:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP39]], ptr [[TMP38]], align 4 // CHECK7-NEXT: [[TMP40:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB2]], i32 [[TMP0]], ptr [[TMP30]]) -// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP41:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP41]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP42:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l102(i32 [[TMP42]]) #[[ATTR4:[0-9]+]] -// CHECK7-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP43:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP43]], ptr [[AA_CASTED4]], align 2 -// CHECK7-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4 +// CHECK7-NEXT: [[TMP44:%.*]] = load i32, ptr [[AA_CASTED4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK7-NEXT: store i32 [[TMP44]], ptr [[TMP45]], align 4 // CHECK7-NEXT: [[TMP46:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -5655,13 +5655,13 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l111(i32 [[TMP44]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: -// CHECK7-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP61:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP61]], ptr [[A_CASTED8]], align 4 -// CHECK7-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4 -// CHECK7-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP62:%.*]] = load i32, ptr [[A_CASTED8]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP63:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP63]], ptr [[AA_CASTED9]], align 2 -// CHECK7-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4 -// CHECK7-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP64:%.*]] = load i32, ptr [[AA_CASTED9]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP65:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -5710,10 +5710,10 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l118(i32 [[TMP62]], i32 [[TMP64]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP85:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP85]], ptr [[A_CASTED17]], align 4 -// CHECK7-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED17]], align 4 -// CHECK7-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP86:%.*]] = load i32, ptr [[A_CASTED17]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP87:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP87]], 20 // CHECK7-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK7: omp_if.then19: @@ -5815,7 +5815,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP86]], ptr [[B]], i32 [[TMP1]], ptr [[VLA]], ptr [[C]], i32 5, i32 [[TMP3]], ptr [[VLA1]], ptr [[D]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END28]] // CHECK7: omp_if.end28: -// CHECK7-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP136:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP137:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP137]]) // CHECK7-NEXT: ret i32 [[TMP136]] @@ -5832,12 +5832,12 @@ // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK7-NEXT: ret void // @@ -5863,45 +5863,45 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK7-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -5961,64 +5961,64 @@ // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4 // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK7-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK7-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !27 +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK7-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 4, !noalias !28 // CHECK7-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] -// CHECK7-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !27 +// CHECK7-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK7-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !27 +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: store i32 1, ptr [[KERNEL_ARGS_I]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 1 -// CHECK7-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !27 +// CHECK7-NEXT: store i32 3, ptr [[TMP19]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 2 -// CHECK7-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP13]], ptr [[TMP20]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 3 -// CHECK7-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP14]], ptr [[TMP21]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 4 -// CHECK7-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr [[TMP15]], ptr [[TMP22]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 5 -// CHECK7-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr @.offload_maptypes, ptr [[TMP23]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 6 -// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr null, ptr [[TMP24]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 7 -// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !27 +// CHECK7-NEXT: store ptr null, ptr [[TMP25]], align 4, !noalias !28 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS_I]], i32 0, i32 8 -// CHECK7-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !27 +// CHECK7-NEXT: store i64 10, ptr [[TMP26]], align 8, !noalias !28 // CHECK7-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_kernel_nowait(ptr @[[GLOB2]], i64 -1, i32 [[TMP18]], i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97.region_id, ptr [[KERNEL_ARGS_I]], i32 0, ptr null, i32 0, ptr null) // CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK7-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK7: omp_offload.failed.i: -// CHECK7-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2 -// CHECK7-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !27 -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK7-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4 -// CHECK7-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 -// CHECK7-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !27 +// CHECK7-NEXT: [[TMP29:%.*]] = load i16, ptr [[TMP12]], align 2, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: store i16 [[TMP29]], ptr [[AA_CASTED_I]], align 2, !noalias !28 +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[AA_CASTED_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP17]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: store i32 [[TMP33]], ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !28 +// CHECK7-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED4_I]], align 4, !noalias !28, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97(i32 [[TMP30]], i32 [[TMP32]], i32 [[TMP34]]) #[[ATTR4]] // CHECK7-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK7: .omp_outlined..1.exit: @@ -6031,9 +6031,9 @@ // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..2, i32 [[TMP1]]) // CHECK7-NEXT: ret void // @@ -6059,48 +6059,48 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[A1]], align 4, !nontemporal !28 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !nontemporal !28 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[A1]], align 4, !nontemporal !29 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !nontemporal !29, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4, !nontemporal !28 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[A1]], align 4, !nontemporal !29 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK7-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6116,9 +6116,9 @@ // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..3, i32 [[TMP1]]) // CHECK7-NEXT: ret void // @@ -6144,50 +6144,50 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK7-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK7-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK7-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK7-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6206,12 +6206,12 @@ // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..6, i32 [[TMP1]], i32 [[TMP3]]) // CHECK7-NEXT: ret void // @@ -6239,53 +6239,53 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK7-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK7-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK7-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK7-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6318,16 +6318,16 @@ // CHECK7-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..9, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK7-NEXT: ret void // @@ -6365,11 +6365,11 @@ // CHECK7-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK7-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 0 @@ -6379,81 +6379,81 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]] -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK7-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK7-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK7-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK7-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK7-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK7-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK7-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK7-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK7-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK7-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK7-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP20]] // CHECK7-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i32 3 -// CHECK7-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK7-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK7-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK7-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK7-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK7-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK7-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP38]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK7-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -6471,27 +6471,27 @@ // CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK7-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK7-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK7-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP8]] // // @@ -6514,28 +6514,28 @@ // CHECK7-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK7-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK7-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK7-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK7-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6619,9 +6619,9 @@ // CHECK7-NEXT: [[TMP46:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP46]] // CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2 +// CHECK7-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX4]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP47]] to i32 -// CHECK7-NEXT: [[TMP48:%.*]] = load i32, ptr [[B]], align 4 +// CHECK7-NEXT: [[TMP48:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV]], [[TMP48]] // CHECK7-NEXT: [[TMP49:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(ptr [[TMP49]]) @@ -6651,19 +6651,19 @@ // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK7-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP4]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i8, ptr [[AAA]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i8 [[TMP6]], ptr [[AAA_CASTED]], align 1 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], 50 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6699,19 +6699,19 @@ // CHECK7-NEXT: store ptr null, ptr [[TMP23]], align 4 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP26]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[SUB:%.*]] = sub i32 [[TMP28]], [[TMP29]] // CHECK7-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK7-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK7-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD5:%.*]] = add i32 [[TMP30]], 1 // CHECK7-NEXT: [[TMP31:%.*]] = zext i32 [[ADD5]] to i64 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -6745,7 +6745,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP7]], ptr [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP43:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP43]] // // @@ -6765,13 +6765,13 @@ // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[A]], align 4 // CHECK7-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: @@ -6826,7 +6826,7 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180(i32 [[TMP1]], i32 [[TMP3]], ptr [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: ret i32 [[TMP27]] // // @@ -6847,18 +6847,18 @@ // CHECK7-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 6, ptr @.omp_outlined..12, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]], i32 [[TMP7]]) // CHECK7-NEXT: ret void // @@ -6889,88 +6889,88 @@ // CHECK7-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK7-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40:![0-9]+]] -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP41]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK7-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP41]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK7-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK7-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP41]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK7-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK7-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK7-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP15]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP41]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP40]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP41]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK7: omp.inner.for.cond9: -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK7-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK7: omp.inner.for.body11: -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK7-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK7-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP20]] to double // CHECK7-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK7-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: store double [[ADD15]], ptr [[A16]], align 4 // CHECK7-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[INC18:%.*]] = fadd double [[TMP21]], 1.000000e+00 // CHECK7-NEXT: store double [[INC18]], ptr [[A17]], align 4 // CHECK7-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -6982,17 +6982,17 @@ // CHECK7: omp.body.continue22: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK7: omp.inner.for.inc23: -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK7-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP43:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP44:![0-9]+]] // CHECK7: omp.inner.for.end25: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK7-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -7020,18 +7020,18 @@ // CHECK7-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..15, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK7-NEXT: ret void // @@ -7065,99 +7065,99 @@ // CHECK7-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK7-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK7-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK7-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK7-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK7: omp.precond.then: // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK7-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45:![0-9]+]] -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK7-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK7-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK7-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK7-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK7-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK7-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK7-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK7-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP46]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK7-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP46]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK7-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP45]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP46]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP47:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK7-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK7-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: -// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK7-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK7-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -7184,12 +7184,12 @@ // CHECK7-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK7-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..18, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK7-NEXT: ret void // @@ -7220,57 +7220,57 @@ // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48:![0-9]+]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK7-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP49]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP49]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]], !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP48]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP49]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP50:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: @@ -7331,203 +7331,203 @@ // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, ptr [[A]], align 4 // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK9-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK9-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 10, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB5]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB6]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV7]], align 4 -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP14]], ptr [[DOTLINEAR_START]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK9: omp.inner.for.cond10: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK9-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK9: omp.inner.for.body12: -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] // CHECK9-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4 -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[A8]], align 4 +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[A8]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK9-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK9: omp.body.continue16: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK9: omp.inner.for.inc17: -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK9-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK9: omp.inner.for.end19: // CHECK9-NEXT: store i32 10, ptr [[A]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB22]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP20]], ptr [[DOTOMP_IV23]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] // CHECK9: omp.inner.for.cond25: -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] // CHECK9-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]] // CHECK9: omp.inner.for.body27: -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK9-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] -// CHECK9-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: [[TMP24:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP24]] to i32 // CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16 -// CHECK9-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE32:%.*]] // CHECK9: omp.body.continue32: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC33:%.*]] // CHECK9: omp.inner.for.inc33: -// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1 -// CHECK9-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end35: // CHECK9-NEXT: store i32 10, ptr [[I24]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB37]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB38]], align 4 -// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV39]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND41:%.*]] // CHECK9: omp.inner.for.cond41: -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]] // CHECK9-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]] // CHECK9: omp.inner.for.body43: -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1 // CHECK9-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] -// CHECK9-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1 -// CHECK9-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV47:%.*]] = sext i16 [[TMP31]] to i32 // CHECK9-NEXT: [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1 // CHECK9-NEXT: [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16 -// CHECK9-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]] // CHECK9: omp.body.continue50: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]] // CHECK9: omp.inner.for.inc51: -// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1 -// CHECK9-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK9: omp.inner.for.end53: // CHECK9-NEXT: store i32 10, ptr [[I40]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB56]], align 4 -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV57]], align 4 // CHECK9-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 0 // CHECK9-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i64 16) ] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]] // CHECK9: omp.inner.for.cond59: -// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]] // CHECK9-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]] // CHECK9: omp.inner.for.body61: -// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1 // CHECK9-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]] -// CHECK9-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK9-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV65:%.*]] = fpext float [[TMP38]] to double // CHECK9-NEXT: [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00 // CHECK9-NEXT: [[CONV67:%.*]] = fptrunc double [[ADD66]] to float -// CHECK9-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds float, ptr [[VLA]], i64 3 -// CHECK9-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV69:%.*]] = fpext float [[TMP39]] to double // CHECK9-NEXT: [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00 // CHECK9-NEXT: [[CONV71:%.*]] = fptrunc double [[ADD70]] to float -// CHECK9-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX72]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK9-NEXT: [[ARRAYIDX75:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i64 [[TMP41]] // CHECK9-NEXT: [[ARRAYIDX76:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX75]], i64 3 -// CHECK9-NEXT: [[TMP42:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP42:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00 -// CHECK9-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1 -// CHECK9-NEXT: store i64 [[ADD78]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i64 [[ADD78]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP44:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP44:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV79:%.*]] = sext i8 [[TMP44]] to i32 // CHECK9-NEXT: [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1 // CHECK9-NEXT: [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8 -// CHECK9-NEXT: store i8 [[CONV81]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: store i8 [[CONV81]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE82:%.*]] // CHECK9: omp.body.continue82: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC83:%.*]] // CHECK9: omp.inner.for.inc83: -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1 -// CHECK9-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK9: omp.inner.for.end85: // CHECK9-NEXT: store i32 10, ptr [[I58]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) // CHECK9-NEXT: ret i32 [[TMP46]] @@ -7541,27 +7541,27 @@ // CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK9-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK9-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK9-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK9-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP8]] // // @@ -7581,10 +7581,10 @@ // CHECK9-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK9-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK9-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK9-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -7593,49 +7593,49 @@ // CHECK9-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD2]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store i32 [[ADD2]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double // CHECK9-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: store double [[ADD3]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store double [[ADD3]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP10:%.*]] = load double, ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 -// CHECK9-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store double [[INC]], ptr [[A4]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK9-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP11]] // CHECK9-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK9-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 10, ptr [[I]], align 4 // CHECK9-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP13]] // CHECK9-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX8]], i64 1 -// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX9]], align 2 +// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX9]], align 2, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV10:%.*]] = sext i16 [[TMP14]] to i32 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP15]] // CHECK9-NEXT: [[TMP16:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP16]]) @@ -7663,12 +7663,12 @@ // CHECK9-NEXT: store i32 0, ptr [[A]], align 4 // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK9-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 @@ -7676,59 +7676,59 @@ // CHECK9-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK9: simd.if.then: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD6:%.*]] = add i32 [[TMP10]], 1 // CHECK9-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]] // CHECK9-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 1 // CHECK9-NEXT: [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]] -// CHECK9-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP14]] to i32 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16 -// CHECK9-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV12:%.*]] = sext i8 [[TMP15]] to i32 // CHECK9-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK9-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8 -// CHECK9-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP22]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK9-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 1 -// CHECK9-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK9: omp.inner.for.end: -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK9-NEXT: [[SUB18:%.*]] = sub i32 [[SUB17]], 1 // CHECK9-NEXT: [[ADD19:%.*]] = add i32 [[SUB18]], 1 @@ -7738,7 +7738,7 @@ // CHECK9-NEXT: store i32 [[ADD22]], ptr [[I5]], align 4 // CHECK9-NEXT: br label [[SIMD_IF_END]] // CHECK9: simd.if.end: -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP21]] // // @@ -7759,42 +7759,42 @@ // CHECK9-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK9-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK9-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK9-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK9-NEXT: ret i32 [[TMP8]] // // @@ -7842,201 +7842,201 @@ // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK11-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK11-NEXT: store i32 [[TMP2]], ptr [[__VLA_EXPR1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 10, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB5]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB6]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV7]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP12]], ptr [[DOTLINEAR_START]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK11: omp.inner.for.cond10: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK11-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK11: omp.inner.for.body12: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] // CHECK11-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[A8]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[A8]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 // CHECK11-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK11: omp.body.continue16: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK11: omp.inner.for.inc17: -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK11-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK11: omp.inner.for.end19: // CHECK11-NEXT: store i32 10, ptr [[A]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB22]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV23]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] // CHECK11: omp.inner.for.cond25: -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] // CHECK11-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]] // CHECK11: omp.inner.for.body27: -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1 // CHECK11-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] -// CHECK11-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP22]] to i32 // CHECK11-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16 -// CHECK11-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE32:%.*]] // CHECK11: omp.body.continue32: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC33:%.*]] // CHECK11: omp.inner.for.inc33: -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK11-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end35: // CHECK11-NEXT: store i32 10, ptr [[I24]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB37]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB38]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV39]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND41:%.*]] // CHECK11: omp.inner.for.cond41: -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]] // CHECK11-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]] // CHECK11: omp.inner.for.body43: -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1 // CHECK11-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] -// CHECK11-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1 -// CHECK11-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: [[TMP29:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV47:%.*]] = sext i16 [[TMP29]] to i32 // CHECK11-NEXT: [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1 // CHECK11-NEXT: [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16 -// CHECK11-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]] // CHECK11: omp.body.continue50: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]] // CHECK11: omp.inner.for.inc51: -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1 -// CHECK11-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK11: omp.inner.for.end53: // CHECK11-NEXT: store i32 10, ptr [[I40]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB56]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV57]], align 4 // CHECK11-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 0 // CHECK11-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i32 16) ] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]] // CHECK11: omp.inner.for.cond59: -// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]] // CHECK11-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]] // CHECK11: omp.inner.for.body61: -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1 // CHECK11-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]] -// CHECK11-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1 -// CHECK11-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV65:%.*]] = fpext float [[TMP36]] to double // CHECK11-NEXT: [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00 // CHECK11-NEXT: [[CONV67:%.*]] = fptrunc double [[ADD66]] to float -// CHECK11-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds float, ptr [[VLA]], i32 3 -// CHECK11-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV69:%.*]] = fpext float [[TMP37]] to double // CHECK11-NEXT: [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00 // CHECK11-NEXT: [[CONV71:%.*]] = fptrunc double [[ADD70]] to float -// CHECK11-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX72]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP38:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP38:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK11-NEXT: [[ARRAYIDX75:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i32 [[TMP39]] // CHECK11-NEXT: [[ARRAYIDX76:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX75]], i32 3 -// CHECK11-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00 -// CHECK11-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP41:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1 -// CHECK11-NEXT: store i64 [[ADD78]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i64 [[ADD78]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP42:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP42:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV79:%.*]] = sext i8 [[TMP42]] to i32 // CHECK11-NEXT: [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1 // CHECK11-NEXT: [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8 -// CHECK11-NEXT: store i8 [[CONV81]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: store i8 [[CONV81]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE82:%.*]] // CHECK11: omp.body.continue82: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC83:%.*]] // CHECK11: omp.inner.for.inc83: -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1 -// CHECK11-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK11: omp.inner.for.end85: // CHECK11-NEXT: store i32 10, ptr [[I58]], align 4 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP45:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP45]]) // CHECK11-NEXT: ret i32 [[TMP44]] @@ -8050,27 +8050,27 @@ // CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK11-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK11-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK11-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK11-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP8]] // // @@ -8090,10 +8090,10 @@ // CHECK11-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] @@ -8101,49 +8101,49 @@ // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD2]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store i32 [[ADD2]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double // CHECK11-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: store double [[ADD3]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store double [[ADD3]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP9:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP9:%.*]] = load double, ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 -// CHECK11-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store double [[INC]], ptr [[A4]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK11-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP10]] // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK11-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: store i16 [[CONV5]], ptr [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK11-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 10, ptr [[I]], align 4 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP12]] // CHECK11-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX8]], i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX9]], align 2 +// CHECK11-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX9]], align 2, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV10:%.*]] = sext i16 [[TMP13]] to i32 -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV10]], [[TMP14]] // CHECK11-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) @@ -8171,12 +8171,12 @@ // CHECK11-NEXT: store i32 0, ptr [[A]], align 4 // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK11-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 @@ -8184,59 +8184,59 @@ // CHECK11-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK11: simd.if.then: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD6:%.*]] = add i32 [[TMP10]], 1 // CHECK11-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]] // CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 1 // CHECK11-NEXT: [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]] -// CHECK11-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK11-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP14]] to i32 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16 -// CHECK11-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV12:%.*]] = sext i8 [[TMP15]] to i32 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK11-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8 -// CHECK11-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP23]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK11-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 1 -// CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK11: omp.inner.for.end: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK11-NEXT: [[SUB18:%.*]] = sub i32 [[SUB17]], 1 // CHECK11-NEXT: [[ADD19:%.*]] = add i32 [[SUB18]], 1 @@ -8246,7 +8246,7 @@ // CHECK11-NEXT: store i32 [[ADD22]], ptr [[I5]], align 4 // CHECK11-NEXT: br label [[SIMD_IF_END]] // CHECK11: simd.if.end: -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP21]] // // @@ -8267,42 +8267,42 @@ // CHECK11-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK11-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK11-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK11-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK11-NEXT: ret i32 [[TMP8]] // // @@ -8350,203 +8350,203 @@ // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[A]], align 4 // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK13-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK13-NEXT: store i64 [[TMP4]], ptr [[__VLA_EXPR1]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB5]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB6]], align 4 -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4 +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV7]], align 4 -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP14]], ptr [[DOTLINEAR_START]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK13: omp.inner.for.cond10: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4 +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK13-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK13: omp.inner.for.body12: -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] -// CHECK13-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4, !nontemporal !7 -// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[A8]], align 4, !nontemporal !7 +// CHECK13-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4, !nontemporal !8 +// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[A8]], align 4, !nontemporal !8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK13-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4, !nontemporal !7 +// CHECK13-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4, !nontemporal !8 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK13: omp.body.continue16: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK13: omp.inner.for.inc17: -// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK13-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK13: omp.inner.for.end19: // CHECK13-NEXT: store i32 10, ptr [[A]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB22]], align 4 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4 +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP20]], ptr [[DOTOMP_IV23]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] // CHECK13: omp.inner.for.cond25: -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] // CHECK13-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]] // CHECK13: omp.inner.for.body27: -// CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP23]], 1 // CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] -// CHECK13-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK13-NEXT: [[TMP24:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK13-NEXT: [[TMP24:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP24]] to i32 // CHECK13-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV]], 1 // CHECK13-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16 -// CHECK13-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE32:%.*]] // CHECK13: omp.body.continue32: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC33:%.*]] // CHECK13: omp.inner.for.inc33: -// CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD34:%.*]] = add nsw i32 [[TMP25]], 1 -// CHECK13-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP10]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK13: omp.inner.for.end35: // CHECK13-NEXT: store i32 10, ptr [[I24]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB37]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB38]], align 4 -// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4 +// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV39]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND41:%.*]] // CHECK13: omp.inner.for.cond41: -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP42:%.*]] = icmp sle i32 [[TMP27]], [[TMP28]] // CHECK13-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]] // CHECK13: omp.inner.for.body43: -// CHECK13-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP29]], 1 // CHECK13-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] -// CHECK13-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK13-NEXT: [[TMP30:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP30]], 1 -// CHECK13-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK13-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK13-NEXT: [[TMP31:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV47:%.*]] = sext i16 [[TMP31]] to i32 // CHECK13-NEXT: [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1 // CHECK13-NEXT: [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16 -// CHECK13-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]] // CHECK13: omp.body.continue50: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]] // CHECK13: omp.inner.for.inc51: -// CHECK13-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK13-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP32]], 1 -// CHECK13-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK13: omp.inner.for.end53: // CHECK13-NEXT: store i32 10, ptr [[I40]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB56]], align 4 -// CHECK13-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4 +// CHECK13-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP33]], ptr [[DOTOMP_IV57]], align 4 // CHECK13-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 0 // CHECK13-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i64 16) ] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]] // CHECK13: omp.inner.for.cond59: -// CHECK13-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] -// CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP34]], [[TMP35]] // CHECK13-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]] // CHECK13: omp.inner.for.body61: -// CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP36]], 1 // CHECK13-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]] -// CHECK13-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK13-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK13-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK13-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP38:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV65:%.*]] = fpext float [[TMP38]] to double // CHECK13-NEXT: [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00 // CHECK13-NEXT: [[CONV67:%.*]] = fptrunc double [[ADD66]] to float -// CHECK13-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds float, ptr [[VLA]], i64 3 -// CHECK13-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP39:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV69:%.*]] = fpext float [[TMP39]] to double // CHECK13-NEXT: [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00 // CHECK13-NEXT: [[CONV71:%.*]] = fptrunc double [[ADD70]] to float -// CHECK13-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i64 0, i64 1 // CHECK13-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX72]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD74:%.*]] = fadd double [[TMP40]], 1.000000e+00 -// CHECK13-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[TMP41:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK13-NEXT: [[ARRAYIDX75:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i64 [[TMP41]] // CHECK13-NEXT: [[ARRAYIDX76:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX75]], i64 3 -// CHECK13-NEXT: [[TMP42:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP42:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD77:%.*]] = fadd double [[TMP42]], 1.000000e+00 -// CHECK13-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP43:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP43:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP43]], 1 -// CHECK13-NEXT: store i64 [[ADD78]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store i64 [[ADD78]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP44:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP44:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV79:%.*]] = sext i8 [[TMP44]] to i32 // CHECK13-NEXT: [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1 // CHECK13-NEXT: [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8 -// CHECK13-NEXT: store i8 [[CONV81]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: store i8 [[CONV81]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE82:%.*]] // CHECK13: omp.body.continue82: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC83:%.*]] // CHECK13: omp.inner.for.inc83: -// CHECK13-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK13-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD84:%.*]] = add nsw i32 [[TMP45]], 1 -// CHECK13-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP16]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP17:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK13: omp.inner.for.end85: // CHECK13-NEXT: store i32 10, ptr [[I58]], align 4 -// CHECK13-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) // CHECK13-NEXT: ret i32 [[TMP46]] @@ -8560,27 +8560,27 @@ // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK13-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(ptr noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK13-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK13-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK13-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP8]] // // @@ -8601,82 +8601,82 @@ // CHECK13-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK13-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK13-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK13-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK13-NEXT: store i64 [[TMP2]], ptr [[__VLA_EXPR0]], align 8 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK13-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK13-NEXT: [[TMP7:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK13: omp_if.then: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK13-NEXT: [[ADD3:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD3]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: store i32 [[ADD3]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double // CHECK13-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP20]] // CHECK13-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP12:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: [[TMP12:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 -// CHECK13-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP20]] // CHECK13-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK13-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP13]] // CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK13-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: br label [[OMP_IF_END:%.*]] // CHECK13: omp_if.else: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK13: omp.inner.for.cond9: -// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK13-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK13-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK13: omp.inner.for.body11: -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK13-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK13-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[B]], align 4 +// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP18]] to double // CHECK13-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK13-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: store double [[ADD15]], ptr [[A16]], align 8 // CHECK13-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP19:%.*]] = load double, ptr [[A17]], align 8 +// CHECK13-NEXT: [[TMP19:%.*]] = load double, ptr [[A17]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[INC18:%.*]] = fadd double [[TMP19]], 1.000000e+00 // CHECK13-NEXT: store double [[INC18]], ptr [[A17]], align 8 // CHECK13-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -8688,10 +8688,10 @@ // CHECK13: omp.body.continue22: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK13: omp.inner.for.inc23: -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP21]], 1 // CHECK13-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK13: omp.inner.for.end25: // CHECK13-NEXT: br label [[OMP_IF_END]] // CHECK13: omp_if.end: @@ -8699,9 +8699,9 @@ // CHECK13-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[TMP22]] // CHECK13-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX26]], i64 1 -// CHECK13-NEXT: [[TMP23:%.*]] = load i16, ptr [[ARRAYIDX27]], align 2 +// CHECK13-NEXT: [[TMP23:%.*]] = load i16, ptr [[ARRAYIDX27]], align 2, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV28:%.*]] = sext i16 [[TMP23]] to i32 -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP24]] // CHECK13-NEXT: [[TMP25:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP25]]) @@ -8729,12 +8729,12 @@ // CHECK13-NEXT: store i32 0, ptr [[A]], align 4 // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK13-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 @@ -8742,59 +8742,59 @@ // CHECK13-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD6:%.*]] = add i32 [[TMP10]], 1 // CHECK13-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]] // CHECK13-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 1 // CHECK13-NEXT: [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]] -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK13-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP14]] to i32 // CHECK13-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV]], 1 // CHECK13-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16 -// CHECK13-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV12:%.*]] = sext i8 [[TMP15]] to i32 // CHECK13-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK13-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8 -// CHECK13-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP25]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK13-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 1 -// CHECK13-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK13-NEXT: [[SUB18:%.*]] = sub i32 [[SUB17]], 1 // CHECK13-NEXT: [[ADD19:%.*]] = add i32 [[SUB18]], 1 @@ -8804,7 +8804,7 @@ // CHECK13-NEXT: store i32 [[ADD22]], ptr [[I5]], align 4 // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP21]] // // @@ -8825,42 +8825,42 @@ // CHECK13-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK13-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK13-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK13-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP28]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i64 0, i64 2 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK13-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: ret i32 [[TMP8]] // // @@ -8908,201 +8908,201 @@ // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK15-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK15-NEXT: store i32 [[TMP2]], ptr [[__VLA_EXPR1]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK15-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB5]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB6]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB5]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV7]], align 4 -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP12]], ptr [[DOTLINEAR_START]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] // CHECK15: omp.inner.for.cond10: -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB6]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK15-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK15: omp.inner.for.body12: -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] -// CHECK15-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4, !nontemporal !8 -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[A8]], align 4, !nontemporal !8 +// CHECK15-NEXT: store i32 [[ADD14]], ptr [[A8]], align 4, !nontemporal !9 +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[A8]], align 4, !nontemporal !9, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK15-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4, !nontemporal !8 +// CHECK15-NEXT: store i32 [[ADD15]], ptr [[A8]], align 4, !nontemporal !9 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK15: omp.body.continue16: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK15: omp.inner.for.inc17: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4 +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV7]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK15-NEXT: store i32 [[ADD18]], ptr [[DOTOMP_IV7]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK15: omp.inner.for.end19: // CHECK15-NEXT: store i32 10, ptr [[A]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB21]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB22]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_LB21]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP18]], ptr [[DOTOMP_IV23]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] // CHECK15: omp.inner.for.cond25: -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_UB22]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] // CHECK15-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END35:%.*]] // CHECK15: omp.inner.for.body27: -// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP21]], 1 // CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] -// CHECK15-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: store i32 [[ADD29]], ptr [[I24]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP22]] to i32 // CHECK15-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV]], 1 // CHECK15-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i16 -// CHECK15-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: store i16 [[CONV31]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE32:%.*]] // CHECK15: omp.body.continue32: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC33:%.*]] // CHECK15: omp.inner.for.inc33: -// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD34:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK15-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV23]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK15: omp.inner.for.end35: // CHECK15-NEXT: store i32 10, ptr [[I24]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB37]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB38]], align 4 -// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_LB37]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV39]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND41:%.*]] // CHECK15: omp.inner.for.cond41: -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] -// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_UB38]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP42:%.*]] = icmp sle i32 [[TMP25]], [[TMP26]] // CHECK15-NEXT: br i1 [[CMP42]], label [[OMP_INNER_FOR_BODY43:%.*]], label [[OMP_INNER_FOR_END53:%.*]] // CHECK15: omp.inner.for.body43: -// CHECK15-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL44:%.*]] = mul nsw i32 [[TMP27]], 1 // CHECK15-NEXT: [[ADD45:%.*]] = add nsw i32 0, [[MUL44]] -// CHECK15-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: store i32 [[ADD45]], ptr [[I40]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: [[TMP28:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP28]], 1 -// CHECK15-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK15-NEXT: [[TMP29:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: store i32 [[ADD46]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: [[TMP29:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV47:%.*]] = sext i16 [[TMP29]] to i32 // CHECK15-NEXT: [[ADD48:%.*]] = add nsw i32 [[CONV47]], 1 // CHECK15-NEXT: [[CONV49:%.*]] = trunc i32 [[ADD48]] to i16 -// CHECK15-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: store i16 [[CONV49]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP15]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE50:%.*]] // CHECK15: omp.body.continue50: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC51:%.*]] // CHECK15: omp.inner.for.inc51: -// CHECK15-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK15-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD52:%.*]] = add nsw i32 [[TMP30]], 1 -// CHECK15-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD52]], ptr [[DOTOMP_IV39]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND41]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK15: omp.inner.for.end53: // CHECK15-NEXT: store i32 10, ptr [[I40]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB55]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB56]], align 4 -// CHECK15-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4 +// CHECK15-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_LB55]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP31]], ptr [[DOTOMP_IV57]], align 4 // CHECK15-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 0 // CHECK15-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i32 16) ] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND59:%.*]] // CHECK15: omp.inner.for.cond59: -// CHECK15-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17:![0-9]+]] -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_UB56]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP60:%.*]] = icmp sle i32 [[TMP32]], [[TMP33]] // CHECK15-NEXT: br i1 [[CMP60]], label [[OMP_INNER_FOR_BODY61:%.*]], label [[OMP_INNER_FOR_END85:%.*]] // CHECK15: omp.inner.for.body61: -// CHECK15-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL62:%.*]] = mul nsw i32 [[TMP34]], 1 // CHECK15-NEXT: [[ADD63:%.*]] = add nsw i32 0, [[MUL62]] -// CHECK15-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK15-NEXT: [[TMP35:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store i32 [[ADD63]], ptr [[I58]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK15-NEXT: [[TMP35:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD64:%.*]] = add nsw i32 [[TMP35]], 1 -// CHECK15-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store i32 [[ADD64]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[B]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP36:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV65:%.*]] = fpext float [[TMP36]] to double // CHECK15-NEXT: [[ADD66:%.*]] = fadd double [[CONV65]], 1.000000e+00 // CHECK15-NEXT: [[CONV67:%.*]] = fptrunc double [[ADD66]] to float -// CHECK15-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store float [[CONV67]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[ARRAYIDX68:%.*]] = getelementptr inbounds float, ptr [[VLA]], i32 3 -// CHECK15-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP37:%.*]] = load float, ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV69:%.*]] = fpext float [[TMP37]] to double // CHECK15-NEXT: [[ADD70:%.*]] = fadd double [[CONV69]], 1.000000e+00 // CHECK15-NEXT: [[CONV71:%.*]] = fptrunc double [[ADD70]] to float -// CHECK15-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store float [[CONV71]], ptr [[ARRAYIDX68]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[ARRAYIDX72:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[C]], i32 0, i32 1 // CHECK15-NEXT: [[ARRAYIDX73:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX72]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP38:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP38:%.*]] = load double, ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD74:%.*]] = fadd double [[TMP38]], 1.000000e+00 -// CHECK15-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store double [[ADD74]], ptr [[ARRAYIDX73]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK15-NEXT: [[ARRAYIDX75:%.*]] = getelementptr inbounds double, ptr [[VLA1]], i32 [[TMP39]] // CHECK15-NEXT: [[ARRAYIDX76:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX75]], i32 3 -// CHECK15-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP40:%.*]] = load double, ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD77:%.*]] = fadd double [[TMP40]], 1.000000e+00 -// CHECK15-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store double [[ADD77]], ptr [[ARRAYIDX76]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP41:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP41:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD78:%.*]] = add nsw i64 [[TMP41]], 1 -// CHECK15-NEXT: store i64 [[ADD78]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store i64 [[ADD78]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[D]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP42:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP42:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV79:%.*]] = sext i8 [[TMP42]] to i32 // CHECK15-NEXT: [[ADD80:%.*]] = add nsw i32 [[CONV79]], 1 // CHECK15-NEXT: [[CONV81:%.*]] = trunc i32 [[ADD80]] to i8 -// CHECK15-NEXT: store i8 [[CONV81]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: store i8 [[CONV81]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE82:%.*]] // CHECK15: omp.body.continue82: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC83:%.*]] // CHECK15: omp.inner.for.inc83: -// CHECK15-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]] +// CHECK15-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD84:%.*]] = add nsw i32 [[TMP43]], 1 -// CHECK15-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP17]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD84]], ptr [[DOTOMP_IV57]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND59]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK15: omp.inner.for.end85: // CHECK15-NEXT: store i32 10, ptr [[I58]], align 4 -// CHECK15-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP44:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP45:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP45]]) // CHECK15-NEXT: ret i32 [[TMP44]] @@ -9116,27 +9116,27 @@ // CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK15-NEXT: store i32 [[ADD]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK15-NEXT: store i32 [[ADD2]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK15-NEXT: store i32 [[ADD4]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK15-NEXT: store i32 [[ADD6]], ptr [[A]], align 4 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP8]] // // @@ -9157,81 +9157,81 @@ // CHECK15-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK15-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK15-NEXT: store i32 [[ADD]], ptr [[B]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR0]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK15-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK15-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK15: omp_if.then: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK15-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK15-NEXT: [[ADD3:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD3]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: store i32 [[ADD3]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double // CHECK15-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK15-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP11:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: [[TMP11:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 -// CHECK15-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK15-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK15-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP12]] // CHECK15-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK15-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] // CHECK15: omp_if.else: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK15: omp.inner.for.cond9: -// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK15-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK15-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK15: omp.inner.for.body11: -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK15-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK15-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4 +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP17]] to double // CHECK15-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK15-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: store double [[ADD15]], ptr [[A16]], align 4 // CHECK15-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[THIS1]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP18:%.*]] = load double, ptr [[A17]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = load double, ptr [[A17]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[INC18:%.*]] = fadd double [[TMP18]], 1.000000e+00 // CHECK15-NEXT: store double [[INC18]], ptr [[A17]], align 4 // CHECK15-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -9243,10 +9243,10 @@ // CHECK15: omp.body.continue22: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK15: omp.inner.for.inc23: -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP20]], 1 // CHECK15-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK15: omp.inner.for.end25: // CHECK15-NEXT: br label [[OMP_IF_END]] // CHECK15: omp_if.end: @@ -9254,9 +9254,9 @@ // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i32 [[TMP21]] // CHECK15-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX26]], i32 1 -// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[ARRAYIDX27]], align 2 +// CHECK15-NEXT: [[TMP22:%.*]] = load i16, ptr [[ARRAYIDX27]], align 2, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV28:%.*]] = sext i16 [[TMP22]] to i32 -// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, ptr [[B]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[CONV28]], [[TMP23]] // CHECK15-NEXT: [[TMP24:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP24]]) @@ -9284,12 +9284,12 @@ // CHECK15-NEXT: store i32 0, ptr [[A]], align 4 // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: store i8 0, ptr [[AAA]], align 1 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub i32 [[TMP2]], [[TMP3]] // CHECK15-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 @@ -9297,59 +9297,59 @@ // CHECK15-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD6:%.*]] = add i32 [[TMP10]], 1 // CHECK15-NEXT: [[CMP7:%.*]] = icmp ult i32 [[TMP9]], [[ADD6]] // CHECK15-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul i32 [[TMP12]], 1 // CHECK15-NEXT: [[ADD8:%.*]] = add i32 [[TMP11]], [[MUL]] -// CHECK15-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i32 [[ADD8]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK15-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i32 [[ADD9]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: [[TMP14:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP14]] to i32 // CHECK15-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV]], 1 // CHECK15-NEXT: [[CONV11:%.*]] = trunc i32 [[ADD10]] to i16 -// CHECK15-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i16 [[CONV11]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i8, ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV12:%.*]] = sext i8 [[TMP15]] to i32 // CHECK15-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK15-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i8 -// CHECK15-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i8 [[CONV14]], ptr [[AAA]], align 1, !llvm.access.group [[ACC_GRP26]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK15-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: store i32 [[ADD15]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD16:%.*]] = add i32 [[TMP17]], 1 -// CHECK15-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB17:%.*]] = sub i32 [[TMP19]], [[TMP20]] // CHECK15-NEXT: [[SUB18:%.*]] = sub i32 [[SUB17]], 1 // CHECK15-NEXT: [[ADD19:%.*]] = add i32 [[SUB18]], 1 @@ -9359,7 +9359,7 @@ // CHECK15-NEXT: store i32 [[ADD22]], ptr [[I5]], align 4 // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: -// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP21]] // // @@ -9380,42 +9380,42 @@ // CHECK15-NEXT: store i16 0, ptr [[AA]], align 2 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK15-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK15-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: store i32 [[ADD1]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK15-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK15-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: store i16 [[CONV3]], ptr [[AA]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[B]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK15-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: store i32 [[ADD4]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: ret i32 [[TMP8]] // // @@ -9430,12 +9430,12 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK17-NEXT: ret void // @@ -9461,45 +9461,45 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK17-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9515,9 +9515,9 @@ // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i64 [[TMP1]]) // CHECK17-NEXT: ret void // @@ -9543,50 +9543,50 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK17-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK17-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK17-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9605,12 +9605,12 @@ // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // @@ -9638,53 +9638,53 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK17-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9717,16 +9717,16 @@ // CHECK17-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK17-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK17-NEXT: ret void // @@ -9764,11 +9764,11 @@ // CHECK17-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK17-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 0 @@ -9778,81 +9778,81 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK17-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK17-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK17-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK17-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK17-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK17-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK17-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK17-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK17-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK17-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK17-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK17-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP20]] // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i64 3 -// CHECK17-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK17-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK17-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK17-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK17-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK17-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK17-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK17-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -9880,18 +9880,18 @@ // CHECK17-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK17-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK17-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -9925,99 +9925,99 @@ // CHECK17-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK17-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK17-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK17-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK17: omp.precond.then: // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK17-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK17-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK17-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK17-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK17-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK17-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK17-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK17-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK17-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK17-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK17-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP27]] -// CHECK17-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK17-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK17-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK17-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK17-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK17-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK17-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK17-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK17-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK17-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -10046,13 +10046,13 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 +// CHECK17-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]]) // CHECK17-NEXT: ret void // @@ -10081,68 +10081,68 @@ // CHECK17-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK17-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP31]] // CHECK17-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK17-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP31]] // CHECK17-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK17-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP14]] // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK17-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK17-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -10164,12 +10164,12 @@ // CHECK17-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK17-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK17-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK17-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK17-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -10200,57 +10200,57 @@ // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK17-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK17-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK17-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]], !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -10271,12 +10271,12 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12:![0-9]+]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK19-NEXT: ret void // @@ -10302,45 +10302,45 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK19-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10356,9 +10356,9 @@ // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i32 [[TMP1]]) // CHECK19-NEXT: ret void // @@ -10384,50 +10384,50 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK19-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK19-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK19-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10446,12 +10446,12 @@ // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // @@ -10479,53 +10479,53 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP23]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK19-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10558,16 +10558,16 @@ // CHECK19-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK19-NEXT: ret void // @@ -10605,11 +10605,11 @@ // CHECK19-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK19-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 0 @@ -10619,81 +10619,81 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK19-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK19-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK19-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK19-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK19-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK19-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK19-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK19-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK19-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK19-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP20]] // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i32 3 -// CHECK19-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK19-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK19-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK19-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK19-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK19-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK19-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK19-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -10721,18 +10721,18 @@ // CHECK19-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK19-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -10766,99 +10766,99 @@ // CHECK19-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK19-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK19-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK19-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK19: omp.precond.then: // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK19-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK19-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK19-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK19-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK19-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK19-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK19-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK19-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK19-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK19-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK19-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] -// CHECK19-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK19-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK19-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK19-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP29]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK19-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK19-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK19-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK19-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK19-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK19-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -10887,13 +10887,13 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]]) // CHECK19-NEXT: ret void // @@ -10922,68 +10922,68 @@ // CHECK19-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK19-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK19-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP13:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 -// CHECK19-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK19-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK19-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP14]] // CHECK19-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK19-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -11005,12 +11005,12 @@ // CHECK19-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK19-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK19-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -11041,57 +11041,57 @@ // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34:![0-9]+]] -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK19-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK19-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK19-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]], !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP34]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP35:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -11112,12 +11112,12 @@ // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF11:![0-9]+]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i64 [[TMP4]]) // CHECK21-NEXT: ret void // @@ -11143,45 +11143,45 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK21-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11197,9 +11197,9 @@ // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i64 [[TMP1]]) // CHECK21-NEXT: ret void // @@ -11225,50 +11225,50 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]] -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK21-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK21-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK21-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK21-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK21-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11287,12 +11287,12 @@ // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i64 [[TMP1]], i64 [[TMP3]]) // CHECK21-NEXT: ret void // @@ -11320,53 +11320,53 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK21-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK21-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK21-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11399,16 +11399,16 @@ // CHECK21-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8 +// CHECK21-NEXT: [[TMP9:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..3, i64 [[TMP9]], ptr [[TMP0]], i64 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK21-NEXT: ret void // @@ -11446,11 +11446,11 @@ // CHECK21-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[VLA_ADDR4]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK21-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 0 @@ -11460,81 +11460,81 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK21-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK21-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK21-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK21-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK21-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i64 3 -// CHECK21-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK21-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK21-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK21-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i64 0, i64 1 // CHECK21-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK21-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK21-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP20]] // CHECK21-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i64 3 -// CHECK21-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK21-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK21-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store i64 [[ADD19]], ptr [[X]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK21-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK21-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK21-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: store i8 [[CONV22]], ptr [[Y]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK21-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK21-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11562,18 +11562,18 @@ // CHECK21-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8 -// CHECK21-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK21-NEXT: [[TMP6:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP8:%.*]] = load i64, ptr [[AAA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], ptr [[TMP0]]) // CHECK21-NEXT: ret void // @@ -11607,99 +11607,99 @@ // CHECK21-NEXT: store i64 [[AAA]], ptr [[AAA_ADDR]], align 8 // CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK21-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK21-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK21-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK21-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK21-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK21: omp.precond.then: // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK21-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK21-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK21-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK21-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK21-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK21-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK21-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK21-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK21-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK21-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK21-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP27]] -// CHECK21-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK21-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK21-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK21-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK21-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK21-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK21-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK21-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK21-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: -// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK21-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK21-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -11731,18 +11731,18 @@ // CHECK21-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8 -// CHECK21-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP5:%.*]] = load i64, ptr [[B_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK21-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK21-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK21-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 6, ptr @.omp_outlined..5, ptr [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], ptr [[TMP3]], i64 [[TMP7]]) // CHECK21-NEXT: ret void // @@ -11773,88 +11773,88 @@ // CHECK21-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 8 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30:![0-9]+]] -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK21-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK21-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: store double [[ADD4]], ptr [[A]], align 8, !llvm.access.group [[ACC_GRP31]] // CHECK21-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK21-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: store double [[INC]], ptr [[A5]], align 8, !llvm.access.group [[ACC_GRP31]] // CHECK21-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK21-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i64 [[TMP15]] // CHECK21-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i64 1 -// CHECK21-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP31]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] +// CHECK21-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK21-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP30]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK21: omp.inner.for.cond9: -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK21-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK21: omp.inner.for.body11: -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK21-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK21-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK21-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP20]] to double // CHECK21-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK21-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: store double [[ADD15]], ptr [[A16]], align 8 // CHECK21-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 8 +// CHECK21-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[INC18:%.*]] = fadd double [[TMP21]], 1.000000e+00 // CHECK21-NEXT: store double [[INC18]], ptr [[A17]], align 8 // CHECK21-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -11866,17 +11866,17 @@ // CHECK21: omp.body.continue22: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK21: omp.inner.for.inc23: -// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK21-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP33:![0-9]+]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK21: omp.inner.for.end25: // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK21-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -11898,12 +11898,12 @@ // CHECK21-NEXT: store i64 [[AA]], ptr [[AA_ADDR]], align 8 // CHECK21-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8 -// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8 -// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK21-NEXT: [[TMP2:%.*]] = load i64, ptr [[A_CASTED]], align 8, !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8 +// CHECK21-NEXT: [[TMP4:%.*]] = load i64, ptr [[AA_CASTED]], align 8, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i64 [[TMP2]], i64 [[TMP4]], ptr [[TMP0]]) // CHECK21-NEXT: ret void // @@ -11934,57 +11934,57 @@ // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]], !noundef [[NOUNDEF11]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK21-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK21-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK21-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP36]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 2 -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK21-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]], !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK21-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF11]] // CHECK21-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK21-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -12005,12 +12005,12 @@ // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4, !noundef [[NOUNDEF12:![0-9]+]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]]) -// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., i32 [[TMP4]]) // CHECK23-NEXT: ret void // @@ -12036,45 +12036,45 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK23-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12090,9 +12090,9 @@ // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP0:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP0]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, i32 [[TMP1]]) // CHECK23-NEXT: ret void // @@ -12118,50 +12118,50 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]] -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK23-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK23-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 -// CHECK23-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: store i16 [[CONV3]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK23-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 // CHECK23-NEXT: br i1 [[TMP11]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12180,12 +12180,12 @@ // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP2]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined..2, i32 [[TMP1]], i32 [[TMP3]]) // CHECK23-NEXT: ret void // @@ -12213,53 +12213,53 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]] -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK23-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP9]] to i32 // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK23-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP23]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK23-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12292,16 +12292,16 @@ // CHECK23-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 9, ptr @.omp_outlined..3, i32 [[TMP9]], ptr [[TMP0]], i32 [[TMP1]], ptr [[TMP2]], ptr [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], ptr [[TMP6]], ptr [[TMP7]]) // CHECK23-NEXT: ret void // @@ -12339,11 +12339,11 @@ // CHECK23-NEXT: store ptr [[CN]], ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP2:%.*]] = load ptr, ptr [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[VLA_ADDR4]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP6:%.*]] = load ptr, ptr [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load ptr, ptr [[D_ADDR]], align 4 // CHECK23-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 0 @@ -12353,81 +12353,81 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4 +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP9]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP10]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]] -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK23-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK23-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store i32 [[ADD6]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], ptr [[TMP0]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = fpext float [[TMP17]] to double // CHECK23-NEXT: [[ADD7:%.*]] = fadd double [[CONV]], 1.000000e+00 // CHECK23-NEXT: [[CONV8:%.*]] = fptrunc double [[ADD7]] to float -// CHECK23-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store float [[CONV8]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr [[TMP2]], i32 3 -// CHECK23-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV10:%.*]] = fpext float [[TMP18]] to double // CHECK23-NEXT: [[ADD11:%.*]] = fadd double [[CONV10]], 1.000000e+00 // CHECK23-NEXT: [[CONV12:%.*]] = fptrunc double [[ADD11]] to float -// CHECK23-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store float [[CONV12]], ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [5 x [10 x double]], ptr [[TMP3]], i32 0, i32 1 // CHECK23-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x double], ptr [[ARRAYIDX13]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP19:%.*]] = load double, ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD15:%.*]] = fadd double [[TMP19]], 1.000000e+00 -// CHECK23-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store double [[ADD15]], ptr [[ARRAYIDX14]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[TMP20:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK23-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i32 [[TMP20]] // CHECK23-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, ptr [[ARRAYIDX16]], i32 3 -// CHECK23-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP21:%.*]] = load double, ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD18:%.*]] = fadd double [[TMP21]], 1.000000e+00 -// CHECK23-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store double [[ADD18]], ptr [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP22:%.*]] = load i64, ptr [[X]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD19:%.*]] = add nsw i64 [[TMP22]], 1 -// CHECK23-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store i64 [[ADD19]], ptr [[X]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], ptr [[TMP7]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP23:%.*]] = load i8, ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV20:%.*]] = sext i8 [[TMP23]] to i32 // CHECK23-NEXT: [[ADD21:%.*]] = add nsw i32 [[CONV20]], 1 // CHECK23-NEXT: [[CONV22:%.*]] = trunc i32 [[ADD21]] to i8 -// CHECK23-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: store i8 [[CONV22]], ptr [[Y]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP24]], 1 -// CHECK23-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP9]]) -// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK23-NEXT: br i1 [[TMP26]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12455,18 +12455,18 @@ // CHECK23-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP5:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP5]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4 -// CHECK23-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i8 [[TMP7]], ptr [[AAA_CASTED]], align 1 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[AAA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined..4, i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], ptr [[TMP0]]) // CHECK23-NEXT: ret void // @@ -12500,99 +12500,99 @@ // CHECK23-NEXT: store i32 [[AAA]], ptr [[AAA_ADDR]], align 4 // CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[SUB:%.*]] = sub i32 [[TMP3]], [[TMP4]] // CHECK23-NEXT: [[SUB3:%.*]] = sub i32 [[SUB]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add i32 [[SUB3]], 1 // CHECK23-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], 1 // CHECK23-NEXT: [[SUB4:%.*]] = sub i32 [[DIV]], 1 // CHECK23-NEXT: store i32 [[SUB4]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP5]], ptr [[I]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP6]], [[TMP7]] // CHECK23-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK23: omp.precond.then: // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4u(ptr @[[GLOB1]], i32 [[TMP10]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP6:%.*]] = icmp ugt i32 [[TMP11]], [[TMP12]] // CHECK23-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE]] ], [ [[TMP14]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]] -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD7:%.*]] = add i32 [[TMP17]], 1 // CHECK23-NEXT: [[CMP8:%.*]] = icmp ult i32 [[TMP16]], [[ADD7]] // CHECK23-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul i32 [[TMP19]], 1 // CHECK23-NEXT: [[ADD9:%.*]] = add i32 [[TMP18]], [[MUL]] -// CHECK23-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: store i32 [[ADD9]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], 1 -// CHECK23-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK23-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: store i32 [[ADD10]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK23-NEXT: [[TMP21:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP21]] to i32 // CHECK23-NEXT: [[ADD11:%.*]] = add nsw i32 [[CONV]], 1 // CHECK23-NEXT: [[CONV12:%.*]] = trunc i32 [[ADD11]] to i16 -// CHECK23-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP28]] -// CHECK23-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: store i16 [[CONV12]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP29]] +// CHECK23-NEXT: [[TMP22:%.*]] = load i8, ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV13:%.*]] = sext i8 [[TMP22]] to i32 // CHECK23-NEXT: [[ADD14:%.*]] = add nsw i32 [[CONV13]], 1 // CHECK23-NEXT: [[CONV15:%.*]] = trunc i32 [[ADD14]] to i8 -// CHECK23-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: store i8 [[CONV15]], ptr [[AAA_ADDR]], align 1, !llvm.access.group [[ACC_GRP29]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP23]], 1 -// CHECK23-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: store i32 [[ADD16]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD17:%.*]] = add i32 [[TMP24]], 1 -// CHECK23-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP29]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK23-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK23-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: -// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[SUB18:%.*]] = sub i32 [[TMP30]], [[TMP31]] // CHECK23-NEXT: [[SUB19:%.*]] = sub i32 [[SUB18]], 1 // CHECK23-NEXT: [[ADD20:%.*]] = add i32 [[SUB19]], 1 @@ -12624,18 +12624,18 @@ // CHECK23-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP4]], ptr [[B_CASTED]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK23-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK23-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 6, ptr @.omp_outlined..5, ptr [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], ptr [[TMP3]], i32 [[TMP7]]) // CHECK23-NEXT: ret void // @@ -12666,88 +12666,88 @@ // CHECK23-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP3:%.*]] = load ptr, ptr [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP5]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31:![0-9]+]] -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK23-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP13]] to double // CHECK23-NEXT: [[ADD4:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: store double [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK23-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: [[TMP14:%.*]] = load double, ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 -// CHECK23-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: store double [[INC]], ptr [[A5]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK23-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK23-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[TMP3]], i32 [[TMP15]] // CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, ptr [[ARRAYIDX]], i32 1 -// CHECK23-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] +// CHECK23-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], 1 -// CHECK23-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP31]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK23: omp.inner.for.cond9: -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] // CHECK23-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END25:%.*]] // CHECK23: omp.inner.for.body11: -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP19]], 1 // CHECK23-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]] // CHECK23-NEXT: store i32 [[ADD13]], ptr [[I]], align 4 -// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4 +// CHECK23-NEXT: [[TMP20:%.*]] = load i32, ptr [[B_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV14:%.*]] = sitofp i32 [[TMP20]] to double // CHECK23-NEXT: [[ADD15:%.*]] = fadd double [[CONV14]], 1.500000e+00 // CHECK23-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: store double [[ADD15]], ptr [[A16]], align 4 // CHECK23-NEXT: [[A17:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 4 +// CHECK23-NEXT: [[TMP21:%.*]] = load double, ptr [[A17]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[INC18:%.*]] = fadd double [[TMP21]], 1.000000e+00 // CHECK23-NEXT: store double [[INC18]], ptr [[A17]], align 4 // CHECK23-NEXT: [[CONV19:%.*]] = fptosi double [[INC18]] to i16 @@ -12759,17 +12759,17 @@ // CHECK23: omp.body.continue22: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC23:%.*]] // CHECK23: omp.inner.for.inc23: -// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP23]], 1 // CHECK23-NEXT: store i32 [[ADD24]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP34:![0-9]+]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP35:![0-9]+]] // CHECK23: omp.inner.for.end25: // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP5]]) -// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 // CHECK23-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -12791,12 +12791,12 @@ // CHECK23-NEXT: store i32 [[AA]], ptr [[AA_ADDR]], align 4 // CHECK23-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4 -// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP1]], ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4 -// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_CASTED]], align 4, !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP3:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i16 [[TMP3]], ptr [[AA_CASTED]], align 2 -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[AA_CASTED]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined..6, i32 [[TMP2]], i32 [[TMP4]], ptr [[TMP0]]) // CHECK23-NEXT: ret void // @@ -12827,57 +12827,57 @@ // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37:![0-9]+]], !noundef [[NOUNDEF12]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK23-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK23-NEXT: [[TMP10:%.*]] = load i16, ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[CONV:%.*]] = sext i16 [[TMP10]] to i32 // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], 1 // CHECK23-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i16 -// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: store i16 [[CONV4]], ptr [[AA_ADDR]], align 2, !llvm.access.group [[ACC_GRP37]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK23-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: store i32 [[ADD5]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP37]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]], !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK23-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP37]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF12]] // CHECK23-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK23-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: diff --git a/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp @@ -116,34 +116,34 @@ // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR3:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -151,8 +151,8 @@ // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i64 0, i64 0 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK1-NEXT: ret i32 [[TMP18]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF4:![0-9]+]] +// CHECK1-NEXT: ret i32 [[TMP16]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -160,7 +160,7 @@ // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -183,68 +183,68 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 456 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK1-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -284,34 +284,34 @@ // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -319,8 +319,8 @@ // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK3-NEXT: ret i32 [[TMP18]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF5:![0-9]+]] +// CHECK3-NEXT: ret i32 [[TMP16]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -328,7 +328,7 @@ // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -351,66 +351,66 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 456 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP11]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP12]] -// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -451,51 +451,51 @@ // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK5-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK5-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 123, ptr [[I]], align 4 // CHECK5-NEXT: store i32 456, ptr [[J]], align 4 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A10]], i64 0, i64 0 // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX11]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: ret i32 [[TMP9]] // // @@ -522,49 +522,49 @@ // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK7-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK7-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 123, ptr [[I]], align 4 // CHECK7-NEXT: store i32 456, ptr [[J]], align 4 // CHECK7-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A9]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX10]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: ret i32 [[TMP9]] // // @@ -586,18 +586,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x ptr], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -605,100 +605,100 @@ // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 40, i1 false) -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP25]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP42:%.*]] = load i32, ptr [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP43]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP45]], 0 -// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 -// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] -// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP46]], 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP34]], 0 +// CHECK9-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 +// CHECK9-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] +// CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP35]], 1 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP47]], align 4 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 5, ptr [[TMP48]], align 4 -// CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 8 -// CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[TMP41]], ptr [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP54]], align 8 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP55]], align 8 -// CHECK9-NEXT: [[TMP56:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP57:%.*]] = icmp ne i32 [[TMP56]], 0 -// CHECK9-NEXT: br i1 [[TMP57]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 5, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 8 +// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[TMP30]], ptr [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP41]], align 8 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP42]], align 8 +// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP44]], align 8 +// CHECK9-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK9-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], ptr [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP58]]) +// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP47]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) -// CHECK9-NEXT: [[TMP60:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP60]] +// CHECK9-NEXT: [[TMP48:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP48]]) +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP49]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -716,15 +716,15 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined., i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]) // CHECK9-NEXT: ret void // @@ -741,18 +741,18 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_6:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_5:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[J:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[I13:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[J14:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[I11:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[J12:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 @@ -760,132 +760,132 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 -// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV7:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP6]], 0 -// CHECK9-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1 -// CHECK9-NEXT: [[CONV10:%.*]] = sext i32 [[DIV9]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV7]], [[CONV10]] -// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB11]], ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 +// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 +// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV8]] +// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[CMP12:%.*]] = icmp slt i32 0, [[TMP8]] -// CHECK9-NEXT: br i1 [[CMP12]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] +// CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 -// CHECK9-NEXT: [[CMP15:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] +// CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_6]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP16]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[CMP16:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] -// CHECK9-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] +// CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[SUB17:%.*]] = sub nsw i32 [[TMP20]], 0 -// CHECK9-NEXT: [[DIV18:%.*]] = sdiv i32 [[SUB17]], 1 -// CHECK9-NEXT: [[MUL19:%.*]] = mul nsw i32 1, [[DIV18]] -// CHECK9-NEXT: [[CONV20:%.*]] = sext i32 [[MUL19]] to i64 -// CHECK9-NEXT: [[DIV21:%.*]] = sdiv i64 [[TMP19]], [[CONV20]] -// CHECK9-NEXT: [[MUL22:%.*]] = mul nsw i64 [[DIV21]], 1 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL22]] -// CHECK9-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK9-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP23]], 0 -// CHECK9-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 -// CHECK9-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] -// CHECK9-NEXT: [[CONV27:%.*]] = sext i32 [[MUL26]] to i64 -// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i64 [[TMP22]], [[CONV27]] -// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP24]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[MUL31:%.*]] = mul nsw i32 1, [[DIV30]] -// CHECK9-NEXT: [[CONV32:%.*]] = sext i32 [[MUL31]] to i64 -// CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[DIV28]], [[CONV32]] -// CHECK9-NEXT: [[SUB34:%.*]] = sub nsw i64 [[TMP21]], [[MUL33]] -// CHECK9-NEXT: [[MUL35:%.*]] = mul nsw i64 [[SUB34]], 1 -// CHECK9-NEXT: [[ADD36:%.*]] = add nsw i64 0, [[MUL35]] -// CHECK9-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK9-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP20]], 0 +// CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 +// CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] +// CHECK9-NEXT: [[CONV18:%.*]] = sext i32 [[MUL17]] to i64 +// CHECK9-NEXT: [[DIV19:%.*]] = sdiv i64 [[TMP19]], [[CONV18]] +// CHECK9-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] +// CHECK9-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 +// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP23]], 0 +// CHECK9-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 +// CHECK9-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] +// CHECK9-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 +// CHECK9-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP22]], [[CONV25]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP24]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] +// CHECK9-NEXT: [[CONV30:%.*]] = sext i32 [[MUL29]] to i64 +// CHECK9-NEXT: [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]] +// CHECK9-NEXT: [[SUB32:%.*]] = sub nsw i64 [[TMP21]], [[MUL31]] +// CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 +// CHECK9-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] +// CHECK9-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 +// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP25]] to i64 // CHECK9-NEXT: [[TMP26:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP1]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[TMP26]] -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[IDXPROM38:%.*]] = sext i32 [[TMP27]] to i64 -// CHECK9-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM38]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX39]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP27]] to i64 +// CHECK9-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM36]] +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[ADD40:%.*]] = add nsw i64 [[TMP28]], 1 -// CHECK9-NEXT: store i64 [[ADD40]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK9-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD38:%.*]] = add nsw i64 [[TMP28]], 1 +// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK9-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP33]], 0 -// CHECK9-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 -// CHECK9-NEXT: [[MUL43:%.*]] = mul nsw i32 [[DIV42]], 1 -// CHECK9-NEXT: [[ADD44:%.*]] = add nsw i32 0, [[MUL43]] -// CHECK9-NEXT: store i32 [[ADD44]], ptr [[I13]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_5]], align 4 -// CHECK9-NEXT: [[SUB45:%.*]] = sub nsw i32 [[TMP34]], 0 -// CHECK9-NEXT: [[DIV46:%.*]] = sdiv i32 [[SUB45]], 1 -// CHECK9-NEXT: [[MUL47:%.*]] = mul nsw i32 [[DIV46]], 1 -// CHECK9-NEXT: [[ADD48:%.*]] = add nsw i32 0, [[MUL47]] -// CHECK9-NEXT: store i32 [[ADD48]], ptr [[J14]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP33]], 0 +// CHECK9-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 +// CHECK9-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 +// CHECK9-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]] +// CHECK9-NEXT: store i32 [[ADD42]], ptr [[I11]], align 4 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP34]], 0 +// CHECK9-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 +// CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[DIV44]], 1 +// CHECK9-NEXT: [[ADD46:%.*]] = add nsw i32 0, [[MUL45]] +// CHECK9-NEXT: store i32 [[ADD46]], ptr [[J12]], align 4 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK9: .omp.final.done: // CHECK9-NEXT: br label [[OMP_PRECOND_END]] @@ -906,34 +906,34 @@ // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store ptr [[A]], ptr [[TMP0]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK9-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67(ptr [[A]]) #[[ATTR4]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -975,61 +975,61 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 2 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK9-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK9-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK9-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1074,109 +1074,109 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 // CHECK11-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 // CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 40, i1 false) -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK11-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP18]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP23]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP29]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP41]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP43]], 0 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP45]], 1 +// CHECK11-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP34]], 1 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 5, ptr [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP38]], ptr [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 4 -// CHECK11-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP51]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP54]], align 8 -// CHECK11-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 -// CHECK11-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 5, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP27]], ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP40]], align 4 +// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP42]], align 4 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP43]], align 8 +// CHECK11-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK11-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], ptr [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP57:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP57]]) +// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP46]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP58:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP58]]) -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP59]] +// CHECK11-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK11-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP48]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1194,15 +1194,15 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined., i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]) // CHECK11-NEXT: ret void // @@ -1238,18 +1238,18 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1258,46 +1258,46 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP8]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP11]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP12]], [[TMP13]] // CHECK11-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP14]], [[COND_TRUE]] ], [ [[TMP15]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP16]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP17]], [[TMP18]] // CHECK11-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK11-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK11-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] @@ -1306,16 +1306,16 @@ // CHECK11-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] // CHECK11-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK11-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK11-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 // CHECK11-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] // CHECK11-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 // CHECK11-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP22]], [[CONV25]] -// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 // CHECK11-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] @@ -1325,38 +1325,38 @@ // CHECK11-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 // CHECK11-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] // CHECK11-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 -// CHECK11-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP26:%.*]] = mul nsw i32 [[TMP25]], [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP26]] -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP27]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX36]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX36]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP28:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD37:%.*]] = add nsw i64 [[TMP28]], 1 -// CHECK11-NEXT: store i64 [[ADD37]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD37]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK11-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB38:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV39:%.*]] = sdiv i32 [[SUB38]], 1 // CHECK11-NEXT: [[MUL40:%.*]] = mul nsw i32 [[DIV39]], 1 // CHECK11-NEXT: [[ADD41:%.*]] = add nsw i32 0, [[MUL40]] // CHECK11-NEXT: store i32 [[ADD41]], ptr [[I11]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP34]], 0 // CHECK11-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1 // CHECK11-NEXT: [[MUL44:%.*]] = mul nsw i32 [[DIV43]], 1 @@ -1382,34 +1382,34 @@ // CHECK11-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store ptr [[A]], ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes.2, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK11-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67(ptr [[A]]) #[[ATTR4]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1451,59 +1451,59 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 2 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK11-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK11-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP12]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -1549,9 +1549,9 @@ // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -1559,15 +1559,15 @@ // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP9]], 0 // CHECK13-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK13-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -1575,29 +1575,29 @@ // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK13-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP10]], ptr [[DOTOMP_UB]], align 8 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP11]] // CHECK13-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: land.lhs.true: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP12]] // CHECK13-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP13]], ptr [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP14]], [[TMP15]] // CHECK13-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP17]], 0 // CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK13-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -1606,16 +1606,16 @@ // CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK13-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK13-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK13-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK13-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP19]], [[CONV22]] -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK13-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -1625,31 +1625,31 @@ // CHECK13-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK13-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK13-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64 // CHECK13-NEXT: [[TMP23:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP23]] -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP24]] to i64 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM33]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD35:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK13-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 // CHECK13-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 1 // CHECK13-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] // CHECK13-NEXT: store i32 [[ADD39]], ptr [[I9]], align 4 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK13-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 @@ -1657,7 +1657,7 @@ // CHECK13-NEXT: store i32 [[ADD43]], ptr [[J10]], align 4 // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -1681,43 +1681,43 @@ // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK13-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK13-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM6]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 // CHECK13-NEXT: store i32 2, ptr [[J]], align 4 @@ -1752,23 +1752,23 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK15-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK15-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -1776,29 +1776,29 @@ // CHECK15-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK15-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_UB]], align 8 // CHECK15-NEXT: store i32 0, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK15-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: land.lhs.true: -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK15-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP12]], [[TMP13]] // CHECK15-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP15]], 0 // CHECK15-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK15-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -1807,16 +1807,16 @@ // CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK15-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK15-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK15-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK15-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK15-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP17]], [[CONV22]] -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP19]], 0 // CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK15-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -1826,29 +1826,29 @@ // CHECK15-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK15-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK15-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 [[TMP20]], [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]] -// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP22]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD34:%.*]] = add nsw i64 [[TMP23]], 1 -// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB35:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV36:%.*]] = sdiv i32 [[SUB35]], 1 // CHECK15-NEXT: [[MUL37:%.*]] = mul nsw i32 [[DIV36]], 1 // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 0, [[MUL37]] // CHECK15-NEXT: store i32 [[ADD38]], ptr [[I9]], align 4 -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 @@ -1856,7 +1856,7 @@ // CHECK15-NEXT: store i32 [[ADD42]], ptr [[J10]], align 4 // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: -// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -1880,41 +1880,41 @@ // CHECK15-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK15-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK15-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 // CHECK15-NEXT: store i32 2, ptr [[J]], align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_simd_reduction_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_reduction_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_reduction_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_reduction_codegen.cpp @@ -89,34 +89,34 @@ // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(ptr @_ZZ4mainE5sivar) #[[ATTR3:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -160,49 +160,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -211,21 +211,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: @@ -240,15 +240,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -263,36 +263,36 @@ // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) -// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK1-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -335,49 +335,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -386,21 +386,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: @@ -415,15 +415,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -445,34 +445,34 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l60(ptr @_ZZ4mainE5sivar) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -516,49 +516,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -567,21 +567,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: @@ -596,15 +596,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -619,36 +619,36 @@ // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) -// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP5]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP16]], align 8 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 -// CHECK3-NEXT: br i1 [[TMP18]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -691,49 +691,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -742,21 +742,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: @@ -771,15 +771,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -803,36 +803,36 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -854,37 +854,37 @@ // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !6 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK5-NEXT: ret i32 0 // @@ -902,36 +902,36 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() @@ -953,37 +953,37 @@ // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !7 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: ret i32 0 // @@ -1034,52 +1034,52 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group !4 -// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4 +// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1088,21 +1088,21 @@ // CHECK9: .omp.final.done: // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK9-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK9-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK9-NEXT: ] // CHECK9: .omp.reduction.case1: -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] // CHECK9-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.case2: -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP20:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP19]] monotonic, align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.default: @@ -1117,15 +1117,15 @@ // CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK9-NEXT: ret void // // diff --git a/clang/test/OpenMP/target_update_codegen.cpp b/clang/test/OpenMP/target_update_codegen.cpp --- a/clang/test/OpenMP/target_update_codegen.cpp +++ b/clang/test/OpenMP/target_update_codegen.cpp @@ -408,10 +408,10 @@ // CK6-64-DAG: [[ADD_PTR]] = getelementptr inbounds i32, i32* [[ONE:%.+]], i{{32|64}} [[IDX_EXT:%.+]] // CK6-32-DAG: [[ADD_PTR]] = getelementptr inbounds i32, i32* [[ONE:%.+]], i{{32|64}} [[L_VAL:%.+]] // CK6-64-DAG: [[IDX_EXT]] = sext i32 [[L_VAL:%.+]] to i64 - // CK6-DAG: [[L_VAL]] = load i32, i32* [[L_ADDR:%.+]] + // CK6-DAG: [[L_VAL]] = load i32, i32* [[L_ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK6-DAG: store i32 {{.+}}, i32* [[L_ADDR]] - // CK6-DAG: [[ONE]] = load i32*, i32** [[B_ADDR:%.+]] - // CK6-DAG: [[TWO]] = load i32*, i32** [[B_ADDR]] + // CK6-DAG: [[ONE]] = load i32*, i32** [[B_ADDR:%.+]], align [[ALIGN2:[0-9]]], !noundef [[NOUNDEF]] + // CK6-DAG: [[TWO]] = load i32*, i32** [[B_ADDR]], #pragma omp target update to(*(B+l)) *(B+l) += e; #pragma omp target update from(*(B+l)) @@ -560,7 +560,7 @@ // CK9-DAG: store double* [[P_VAL:%.+]], double** [[PC0]] // CK9-DAG: [[P]] = getelementptr inbounds [[STRUCT_S:%.+]], [[STRUCT_S]]* [[S_VAL:%.+]], i32 0, i32 0 // CK9-DAG: [[S_VAL]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S_ADDR:%.+]] - // CK9-DAG: [[P_VAL]] = load double*, double** [[P_1:%.+]], + // CK9-DAG: [[P_VAL]] = load double*, double** [[P_1:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK9-DAG: [[P_1]] = getelementptr inbounds [[STRUCT_S]], [[STRUCT_S]]* [[S_VAL_2:%.+]], i32 0, i32 0 // CK9-DAG: [[S_VAL_2]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S_ADDR:%.+]] #pragma omp target update to(*(s->p)) @@ -661,7 +661,7 @@ // CK11-64-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[S_P:%.+]], i{{.+}} [[IDX_EXT:%.+]] // CK11-32-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[S_P:%.+]], i{{.+}} [[L_VAL:%.+]] // CK11-64-DAG: [[IDX_EXT]] = sext i32 [[L_VAL:%.+]] to i64 - // CK11-DAG: [[S_P]] = load double*, double** [[P_1:%.+]], + // CK11-DAG: [[S_P]] = load double*, double** [[P_1:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK11-DAG: [[P_1]] = getelementptr inbounds [[STRUCT_S]], [[STRUCT_S]]* [[S_ADDR:%.+]], i32 0, i32 0 #pragma omp target update to((s->p+l)[3]) (s->p+l)[3] += e; @@ -723,7 +723,7 @@ // CK12-DAG: store [[STRUCT_S]]** [[S:%.+]], [[STRUCT_S]]*** [[S_VAL:%.+]] // CK12-DAG: store i{{.+}} {{.+}}, i{{.+}}* [[SIZE0]] // CK12-DAG: [[SP]] = getelementptr inbounds [[STRUCT_S]], [[STRUCT_S]]* [[ONE:%.+]], i32 0, i32 1 - // CK12-DAG: [[ONE]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S:%.+]], + // CK12-DAG: [[ONE]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK12-DAG: [[ZERO]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S]], #pragma omp target update to(*(s->sp->p)) *(s->sp->p) = e; @@ -767,11 +767,11 @@ // CK13-64-DAG: [[ADD_PTR_2]] = getelementptr inbounds i32, i32* [[RESULT:%.+]], i64 [[IDX_EXT_1:%.+]] // CK13-32-DAG: [[ADD_PTR_2]] = getelementptr inbounds i32, i32* [[RESULT:%.+]], i32 [[B_ADDR:%.+]] // CK13-64-DAG: [[IDX_EXT_1]] = sext i32 [[B_ADDR:%.+]] - // CK13-DAG: [[RESULT]] = load i32*, i32** [[ADD_PTR:%.+]], + // CK13-DAG: [[RESULT]] = load i32*, i32** [[ADD_PTR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK13-64-DAG: [[ADD_PTR]] = getelementptr inbounds i32*, i32** [[B_VAL:%.+]], i64 [[IDX_EXT:%.+]] // CK13-32-DAG: [[ADD_PTR]] = getelementptr inbounds i32*, i32** [[B_VAL:%.+]], i32 [[A_ADDR:%.+]] // CK13-64-DAG: [[IDX_EXT]] = sext i32 [[TWO:%.+]] to i64 - // CK13-DAG: [[B_VAL]] = load i32**, i32*** [[BB_ADDR:%.+]] + // CK13-DAG: [[B_VAL]] = load i32**, i32*** [[BB_ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK13-DAG: [[B_VAL1]] = load i32**, i32*** [[BB_ADDR]] #pragma omp target update to(*(*(BB+a)+b)) *(*(BB+a)+b) = 1; @@ -950,7 +950,7 @@ // CK16-DAG: store float* [[ADD_PTR:%.+]], float** [[PC0]] // CK16-32-DAG: [[ADD_PTR]] = getelementptr inbounds float, float* [[THREE:%.+]], i32 [[I:%.+]] // CK16-64-DAG: [[ADD_PTR]] = getelementptr inbounds float, float* [[THREE:%.+]], i64 [[IDX_EXT:%.+]] - // CK16-DAG: [[THREE]] = load float*, float** [[F_ADDR:%.+]], + // CK16-DAG: [[THREE]] = load float*, float** [[F_ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK16-DAG: [[F]] = load float*, float** [[F_ADDR]], // CK16-64-DAG: [[IDX_EXT]] = sext i32 [[I:%.+]] to i64 @@ -1002,15 +1002,15 @@ // CK17-64-DAG: [[IDX_EXT_3]] = sext i32 [[I_VAL:%.+]] to i64 // CK17-32-DAG: [[ADD_PTR_4]] = getelementptr inbounds float, float* [[SEVEN:%.+]], i32 [[I_VAL:%.+]] // CK17-DAG: [[I_VAL]] = load i32, i32* [[I:%.+]], - // CK17-DAG: [[SEVEN]] = load float*, float** [[ADD_PTR:%.+]], + // CK17-DAG: [[SEVEN]] = load float*, float** [[ADD_PTR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK17-64-DAG: [[ADD_PTR]] = getelementptr inbounds float*, float** [[F:%.+]], i64 [[IDX_EXT:%.+]] // CK17-32-DAG: [[ADD_PTR]] = getelementptr inbounds float*, float** [[F:%.+]], i32 [[ADD:%.+]] // CK17-64-DAG: [[IDX_EXT]] = sext i32 [[ADD:%.+]] to i64 // CK17-DAG: [[ADD]] = add nsw i32 1, [[FIVE:%.+]] - // CK17-DAG: [[FIVE]] = load i32, i32* [[I_2:%.+]], + // CK17-DAG: [[FIVE]] = load i32, i32* [[I_2:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK17-DAG: [[I_2]] = getelementptr inbounds [[SSA:%.+]], [[SSA]]* [[FOUR:%.+]], i32 0, i32 0 // CK17-DAG: [[FOUR]] = load [[SSA]]*, [[SSA]]** [[SSA_ADDR:%.+]], - // CK17-DAG: [[F]] = load float**, float*** [[F_ADDR:%.+]], + // CK17-DAG: [[F]] = load float**, float*** [[F_ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CK17-DAG: [[F_VAL]] = load float**, float*** [[F_ADDR]], #pragma omp target update to(*(sa->sa->i+*(1+sa->i+f))) diff --git a/clang/test/OpenMP/task_affinity_codegen.cpp b/clang/test/OpenMP/task_affinity_codegen.cpp --- a/clang/test/OpenMP/task_affinity_codegen.cpp +++ b/clang/test/OpenMP/task_affinity_codegen.cpp @@ -19,7 +19,7 @@ // CHECK: [[TD:%.+]] = call ptr @__kmpc_omp_task_alloc(ptr @{{.+}}, i32 [[GTID:%.+]], i32 1, i64 40, i64 1, ptr @{{.+}}) // CHECK: [[AFFINE_LST_ADDR:%.+]] = getelementptr inbounds [1 x %struct.kmp_task_affinity_info_t], ptr [[AFFS_ADDR]], i64 0, i64 0 // CHECK: [[P:%.+]] = load ptr, ptr [[P_ADDR:%.+]], - // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR:%.+]], + // CHECK: [[A_VAL:%.+]] = load i32, ptr [[A_ADDR:%.+]], align [[ALIGN:[0-9]]], !noundef [[NOUNDEF:![0-9]+]] // CHECK: [[A_SZ:%.+]] = sext i32 [[A_VAL]] to i64 // CHECK: [[BYTES:%.+]] = mul nuw i64 4, [[A_SZ]] // CHECK: [[SZ:%.+]] = mul nuw i64 [[BYTES]], 10 diff --git a/clang/test/OpenMP/task_codegen.cpp b/clang/test/OpenMP/task_codegen.cpp --- a/clang/test/OpenMP/task_codegen.cpp +++ b/clang/test/OpenMP/task_codegen.cpp @@ -254,7 +254,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -265,12 +265,12 @@ // CHECK1-NEXT: store ptr [[B]], ptr [[TMP5]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK1-NEXT: store ptr [[S]], ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 // CHECK1-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP8]], i32 0, i32 0 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP11]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP9]], i32 0, i32 4 // CHECK1-NEXT: store i32 [[CONV]], ptr [[TMP12]], align 8 @@ -280,7 +280,7 @@ // CHECK1-NEXT: [[TMP15:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP15]], i32 0, i32 0 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP16]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP18]], ptr align 8 [[AGG_CAPTURED1]], i64 8, i1 false) // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP19]], i64 0 @@ -341,12 +341,12 @@ // CHECK1-NEXT: store i64 4, ptr [[TMP56]], align 8 // CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP54]], i32 0, i32 2 // CHECK1-NEXT: store i8 3, ptr [[TMP57]], align 8 -// CHECK1-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP59:%.*]] = sext i8 [[TMP58]] to i64 // CHECK1-NEXT: [[TMP60:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP60]] // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX7]], i64 [[TMP59]] -// CHECK1-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP62:%.*]] = sext i8 [[TMP61]] to i64 // CHECK1-NEXT: [[TMP63:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP63]] @@ -379,12 +379,12 @@ // CHECK1-NEXT: store i64 4, ptr [[TMP81]], align 8 // CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP79]], i32 0, i32 2 // CHECK1-NEXT: store i8 4, ptr [[TMP82]], align 8 -// CHECK1-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP84:%.*]] = sext i8 [[TMP83]] to i64 // CHECK1-NEXT: [[TMP85:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP85]] // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX15]], i64 [[TMP84]] -// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP87:%.*]] = sext i8 [[TMP86]] to i64 // CHECK1-NEXT: [[TMP88:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP88]] @@ -427,10 +427,10 @@ // CHECK1-NEXT: [[TMP112:%.*]] = mul nsw i64 0, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP112]] // CHECK1-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 3 -// CHECK1-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4 +// CHECK1-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP114:%.*]] = sext i32 [[TMP113]] to i64 // CHECK1-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP114]], 1 -// CHECK1-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4 +// CHECK1-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP116:%.*]] = sext i32 [[TMP115]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP116]] // CHECK1-NEXT: [[TMP117:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP2]] @@ -459,26 +459,26 @@ // CHECK1-NEXT: [[TMP133:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP131]]) // CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT_ANON_14]], ptr [[AGG_CAPTURED30]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[C]], ptr [[TMP134]], align 8 -// CHECK1-NEXT: [[TMP135:%.*]] = load i8, ptr [[B]], align 1 +// CHECK1-NEXT: [[TMP135:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP135]], 0 // CHECK1-NEXT: [[TMP136:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK1-NEXT: [[TMP137:%.*]] = or i32 [[TMP136]], 1 // CHECK1-NEXT: [[TMP138:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP137]], i64 40, i64 8, ptr @.omp_task_entry..16) // CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP138]], i32 0, i32 0 // CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP139]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP141:%.*]] = load ptr, ptr [[TMP140]], align 8 +// CHECK1-NEXT: [[TMP141:%.*]] = load ptr, ptr [[TMP140]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP141]], ptr align 8 [[AGG_CAPTURED30]], i64 8, i1 false) // CHECK1-NEXT: [[TMP142:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP138]]) // CHECK1-NEXT: [[TMP143:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 0, i64 256, i64 1, ptr @.omp_task_entry..21) // CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP143]], i32 0, i32 0 // CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP143]], i32 0, i32 2 // CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_20:%.*]], ptr [[TMP145]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP147:%.*]] = load i32, ptr [[C]], align 128 +// CHECK1-NEXT: [[TMP147:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP147]], ptr [[TMP146]], align 128 // CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP144]], i32 0, i32 2 // CHECK1-NEXT: store i32 0, ptr [[TMP148]], align 16 // CHECK1-NEXT: [[TMP149:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP143]]) -// CHECK1-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4 +// CHECK1-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP150]], ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP151:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP151]]) @@ -519,25 +519,25 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK1-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -560,24 +560,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK1-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK1-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK1-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -598,48 +598,48 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK1-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK1-NEXT: ] // CHECK1: .untied.done..i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK1-NEXT: br label [[CLEANUP_I:%.*]] // CHECK1: .untied.jmp..i: -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK1: .untied.jmp.1.i: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK1-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK1-NEXT: br label [[CLEANUP_I]] // CHECK1: cleanup.i: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK1: .omp_outlined..3.exit: // CHECK1-NEXT: ret i32 0 @@ -659,45 +659,45 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK1-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK1-NEXT: ] // CHECK1: .untied.done..i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK1-NEXT: br label [[CLEANUP_I:%.*]] // CHECK1: .untied.jmp..i: -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK1: .untied.jmp.1.i: -// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK1-NEXT: br label [[CLEANUP_I]] // CHECK1: cleanup.i: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK1: .omp_outlined..5.exit: // CHECK1-NEXT: ret i32 0 @@ -717,45 +717,45 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK1-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK1-NEXT: ] // CHECK1: .untied.done..i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[CLEANUP_I:%.*]] // CHECK1: .untied.jmp..i: -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK1: .untied.jmp.1.i: -// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[CLEANUP_I]] // CHECK1: cleanup.i: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK1: .omp_outlined..7.exit: // CHECK1-NEXT: ret i32 0 @@ -774,24 +774,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK1-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK1-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK1-NEXT: ret i32 0 // // @@ -808,24 +808,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK1-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK1-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK1-NEXT: ret i32 0 // // @@ -842,24 +842,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK1-NEXT: store i32 3, ptr @a, align 4, !noalias !82 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK1-NEXT: store i32 3, ptr @a, align 4, !noalias !83 // CHECK1-NEXT: ret i32 0 // // @@ -876,24 +876,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK1-NEXT: store i32 4, ptr @a, align 4, !noalias !92 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK1-NEXT: store i32 4, ptr @a, align 4, !noalias !93 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK1-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK1-NEXT: ret i32 0 @@ -927,30 +927,30 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18]], ptr [[TMP3]], i32 0, i32 2 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !102 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !103 // CHECK1-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK1-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK1-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK1-NEXT: ret i32 0 // // @@ -1006,33 +1006,33 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP3]], i32 0, i32 2 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK1-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK1-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -1042,49 +1042,49 @@ // CHECK1-NEXT: i32 5, label [[DOTUNTIED_JMP_10_I:%.*]] // CHECK1-NEXT: ] // CHECK1: .untied.done..i: -// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK1-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK1-NEXT: br label [[CLEANUP_I:%.*]] // CHECK1: .untied.jmp..i: -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: store i32 1, ptr [[TMP18]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP19]], ptr [[TMP20]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT:%.*]] // CHECK1: .untied.jmp.2.i: // CHECK1-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[DOTS2__VOID_ADDR_I:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP22]], i64 4, ptr inttoptr (i64 7 to ptr)) // CHECK1-NEXT: store ptr [[DOTS2__VOID_ADDR_I]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: store i32 2, ptr [[TMP23]], align 4 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP24]], ptr [[TMP25]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK1: .untied.jmp.3.i: // CHECK1-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) // CHECK1-NEXT: store i32 0, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP28:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP27]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..19) // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP28]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 [[TMP30]], ptr [[TMP29]], align 128 -// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP31]], ptr [[TMP28]]) -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: store i32 3, ptr [[TMP33]], align 4 -// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP34]], ptr [[TMP35]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK1: .untied.jmp.5.i: -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[TMP37]], i32 0) -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: store i32 4, ptr [[TMP39]], align 4 -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP40]], ptr [[TMP41]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK1: .untied.jmp.7.i: @@ -1092,23 +1092,23 @@ // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP13]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false) // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] // CHECK1-NEXT: store i32 10, ptr [[TMP15]], align 4 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP44:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[TMP43]]) -// CHECK1-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: store i32 5, ptr [[TMP45]], align 4 -// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK1-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK1-NEXT: [[TMP48:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP46]], ptr [[TMP47]]) // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK1: .untied.jmp.10.i: // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_free(i32 [[TMP49]], ptr [[TMP15]], ptr inttoptr (i64 7 to ptr)) // CHECK1-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) #[[ATTR4]] -// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK1-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK1-NEXT: br label [[CLEANUP_I]] // CHECK1: cleanup.i: -// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK1-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK1: .omp_outlined..17.exit: // CHECK1-NEXT: ret i32 0 @@ -1174,7 +1174,7 @@ // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..23) // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP2]], i32 0, i32 0 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP3]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP5]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP2]]) // CHECK1-NEXT: ret void @@ -1193,24 +1193,24 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK1-NEXT: ret i32 0 // @@ -1243,7 +1243,7 @@ // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB1]], i32 [[TMP2]]) // CHECK1-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK1-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -1253,11 +1253,11 @@ // CHECK1-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i64 48, i64 8, ptr @.omp_task_entry..27) // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24]], ptr [[TMP6]], i32 0, i32 1 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_25:%.*]], ptr [[TMP10]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store double [[TMP12]], ptr [[TMP11]], align 8 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP2]], ptr [[TMP6]]) // CHECK1-NEXT: call void @__kmpc_end_single(ptr @[[GLOB1]], i32 [[TMP2]]) @@ -1295,31 +1295,31 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META123:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META126:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META128:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META130:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !132 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: store ptr @.omp_task_privates_map..26, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META124:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META127:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META129:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META131:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !133 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: store ptr @.omp_task_privates_map..26, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 // CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !132 -// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !133 +// CHECK1-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = fpext float [[TMP15]] to double // CHECK1-NEXT: [[ADD_I:%.*]] = fadd double [[CONV_I]], [[TMP13]] // CHECK1-NEXT: [[CONV1_I:%.*]] = fptrunc double [[ADD_I]] to float @@ -1374,7 +1374,7 @@ // CHECK2-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK2-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK2: arrayctor.cont: -// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK2-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK2-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -1385,12 +1385,12 @@ // CHECK2-NEXT: store ptr [[B]], ptr [[TMP5]], align 8 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK2-NEXT: store ptr [[S]], ptr [[TMP6]], align 8 -// CHECK2-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 // CHECK2-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP8]], i32 0, i32 0 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP11]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP9]], i32 0, i32 4 // CHECK2-NEXT: store i32 [[CONV]], ptr [[TMP12]], align 8 @@ -1400,7 +1400,7 @@ // CHECK2-NEXT: [[TMP15:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP15]], i32 0, i32 0 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP16]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP18]], ptr align 8 [[AGG_CAPTURED1]], i64 8, i1 false) // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP19]], i64 0 @@ -1461,12 +1461,12 @@ // CHECK2-NEXT: store i64 4, ptr [[TMP56]], align 8 // CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP54]], i32 0, i32 2 // CHECK2-NEXT: store i8 3, ptr [[TMP57]], align 8 -// CHECK2-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP59:%.*]] = sext i8 [[TMP58]] to i64 // CHECK2-NEXT: [[TMP60:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP60]] // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX7]], i64 [[TMP59]] -// CHECK2-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP62:%.*]] = sext i8 [[TMP61]] to i64 // CHECK2-NEXT: [[TMP63:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP63]] @@ -1499,12 +1499,12 @@ // CHECK2-NEXT: store i64 4, ptr [[TMP81]], align 8 // CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP79]], i32 0, i32 2 // CHECK2-NEXT: store i8 4, ptr [[TMP82]], align 8 -// CHECK2-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP84:%.*]] = sext i8 [[TMP83]] to i64 // CHECK2-NEXT: [[TMP85:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK2-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP85]] // CHECK2-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX15]], i64 [[TMP84]] -// CHECK2-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP87:%.*]] = sext i8 [[TMP86]] to i64 // CHECK2-NEXT: [[TMP88:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK2-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP88]] @@ -1547,10 +1547,10 @@ // CHECK2-NEXT: [[TMP112:%.*]] = mul nsw i64 0, [[TMP2]] // CHECK2-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP112]] // CHECK2-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 3 -// CHECK2-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4 +// CHECK2-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP114:%.*]] = sext i32 [[TMP113]] to i64 // CHECK2-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP114]], 1 -// CHECK2-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4 +// CHECK2-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP116:%.*]] = sext i32 [[TMP115]] to i64 // CHECK2-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP116]] // CHECK2-NEXT: [[TMP117:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP2]] @@ -1579,26 +1579,26 @@ // CHECK2-NEXT: [[TMP133:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP131]]) // CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT_ANON_14]], ptr [[AGG_CAPTURED30]], i32 0, i32 0 // CHECK2-NEXT: store ptr [[C]], ptr [[TMP134]], align 8 -// CHECK2-NEXT: [[TMP135:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-NEXT: [[TMP135:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP135]], 0 // CHECK2-NEXT: [[TMP136:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK2-NEXT: [[TMP137:%.*]] = or i32 [[TMP136]], 1 // CHECK2-NEXT: [[TMP138:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP137]], i64 40, i64 8, ptr @.omp_task_entry..16) // CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP138]], i32 0, i32 0 // CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP139]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP141:%.*]] = load ptr, ptr [[TMP140]], align 8 +// CHECK2-NEXT: [[TMP141:%.*]] = load ptr, ptr [[TMP140]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP141]], ptr align 8 [[AGG_CAPTURED30]], i64 8, i1 false) // CHECK2-NEXT: [[TMP142:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP138]]) // CHECK2-NEXT: [[TMP143:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 0, i64 256, i64 1, ptr @.omp_task_entry..21) // CHECK2-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP143]], i32 0, i32 0 // CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP143]], i32 0, i32 2 // CHECK2-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_20:%.*]], ptr [[TMP145]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP147:%.*]] = load i32, ptr [[C]], align 128 +// CHECK2-NEXT: [[TMP147:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP147]], ptr [[TMP146]], align 128 // CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP144]], i32 0, i32 2 // CHECK2-NEXT: store i32 0, ptr [[TMP148]], align 16 // CHECK2-NEXT: [[TMP149:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP143]]) -// CHECK2-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4 +// CHECK2-NEXT: [[TMP150:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP150]], ptr [[RETVAL]], align 4 // CHECK2-NEXT: [[TMP151:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK2-NEXT: call void @llvm.stackrestore(ptr [[TMP151]]) @@ -1639,25 +1639,25 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK2-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK2-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -1680,24 +1680,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK2-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK2-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK2-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -1718,48 +1718,48 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-NEXT: ] // CHECK2: .untied.done..i: -// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2: .untied.jmp..i: -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK2-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK2-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK2: .untied.jmp.1.i: -// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK2-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-NEXT: br label [[CLEANUP_I]] // CHECK2: cleanup.i: -// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK2: .omp_outlined..3.exit: // CHECK2-NEXT: ret i32 0 @@ -1779,45 +1779,45 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-NEXT: ] // CHECK2: .untied.done..i: -// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2: .untied.jmp..i: -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK2-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK2-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK2: .untied.jmp.1.i: -// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-NEXT: br label [[CLEANUP_I]] // CHECK2: cleanup.i: -// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK2: .omp_outlined..5.exit: // CHECK2-NEXT: ret i32 0 @@ -1837,45 +1837,45 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-NEXT: ] // CHECK2: .untied.done..i: -// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2: .untied.jmp..i: -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK2-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK2-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK2: .untied.jmp.1.i: -// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-NEXT: br label [[CLEANUP_I]] // CHECK2: cleanup.i: -// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK2: .omp_outlined..7.exit: // CHECK2-NEXT: ret i32 0 @@ -1894,24 +1894,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK2-NEXT: ret i32 0 // // @@ -1928,24 +1928,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK2-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK2-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK2-NEXT: ret i32 0 // // @@ -1962,24 +1962,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK2-NEXT: store i32 3, ptr @a, align 4, !noalias !82 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK2-NEXT: store i32 3, ptr @a, align 4, !noalias !83 // CHECK2-NEXT: ret i32 0 // // @@ -1996,24 +1996,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK2-NEXT: store i32 4, ptr @a, align 4, !noalias !92 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK2-NEXT: store i32 4, ptr @a, align 4, !noalias !93 // CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK2-NEXT: ret i32 0 @@ -2047,30 +2047,30 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18]], ptr [[TMP3]], i32 0, i32 2 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 // CHECK2-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !102 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !103 // CHECK2-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK2-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK2-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK2-NEXT: ret i32 0 // // @@ -2126,33 +2126,33 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP3]], i32 0, i32 2 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: switch i32 [[TMP17]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -2162,49 +2162,49 @@ // CHECK2-NEXT: i32 5, label [[DOTUNTIED_JMP_10_I:%.*]] // CHECK2-NEXT: ] // CHECK2: .untied.done..i: -// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK2-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK2-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2: .untied.jmp..i: -// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: store i32 1, ptr [[TMP18]], align 4 -// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP19]], ptr [[TMP20]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT:%.*]] // CHECK2: .untied.jmp.2.i: // CHECK2-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) -// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[DOTS2__VOID_ADDR_I:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP22]], i64 4, ptr inttoptr (i64 7 to ptr)) // CHECK2-NEXT: store ptr [[DOTS2__VOID_ADDR_I]], ptr [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: store i32 2, ptr [[TMP23]], align 4 -// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP24]], ptr [[TMP25]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK2: .untied.jmp.3.i: // CHECK2-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) // CHECK2-NEXT: store i32 0, ptr [[TMP15]], align 4 -// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP28:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP27]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..19) // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP28]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 [[TMP30]], ptr [[TMP29]], align 128 -// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP31]], ptr [[TMP28]]) -// CHECK2-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: store i32 3, ptr [[TMP33]], align 4 -// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP34]], ptr [[TMP35]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK2: .untied.jmp.5.i: -// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[TMP37]], i32 0) -// CHECK2-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: store i32 4, ptr [[TMP39]], align 4 -// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP40]], ptr [[TMP41]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK2: .untied.jmp.7.i: @@ -2212,23 +2212,23 @@ // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP13]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false) // CHECK2-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] // CHECK2-NEXT: store i32 10, ptr [[TMP15]], align 4 -// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP44:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[TMP43]]) -// CHECK2-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: store i32 5, ptr [[TMP45]], align 4 -// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK2-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK2-NEXT: [[TMP48:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP46]], ptr [[TMP47]]) // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK2: .untied.jmp.10.i: // CHECK2-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) #[[ATTR4]] -// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @__kmpc_free(i32 [[TMP49]], ptr [[TMP15]], ptr inttoptr (i64 7 to ptr)) // CHECK2-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) #[[ATTR4]] -// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK2-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK2-NEXT: br label [[CLEANUP_I]] // CHECK2: cleanup.i: -// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK2-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK2: .omp_outlined..17.exit: // CHECK2-NEXT: ret i32 0 @@ -2294,7 +2294,7 @@ // CHECK2-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..23) // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP2]], i32 0, i32 0 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP3]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP5]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP2]]) // CHECK2-NEXT: ret void @@ -2313,24 +2313,24 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK2-NEXT: ret i32 0 // @@ -2363,7 +2363,7 @@ // CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB1]], i32 [[TMP2]]) // CHECK2-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK2-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -2373,11 +2373,11 @@ // CHECK2-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i64 48, i64 8, ptr @.omp_task_entry..27) // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24]], ptr [[TMP6]], i32 0, i32 1 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_25:%.*]], ptr [[TMP10]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: store double [[TMP12]], ptr [[TMP11]], align 8 // CHECK2-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP2]], ptr [[TMP6]]) // CHECK2-NEXT: call void @__kmpc_end_single(ptr @[[GLOB1]], i32 [[TMP2]]) @@ -2415,31 +2415,31 @@ // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META123:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META126:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META128:![0-9]+]]) -// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META130:![0-9]+]]) -// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !132 -// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: store ptr @.omp_task_privates_map..26, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META124:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META127:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META129:![0-9]+]]) +// CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META131:![0-9]+]]) +// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !133 +// CHECK2-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: store ptr @.omp_task_privates_map..26, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 // CHECK2-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !132 -// CHECK2-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8 +// CHECK2-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !133 +// CHECK2-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4, !noundef [[NOUNDEF3]] // CHECK2-NEXT: [[CONV_I:%.*]] = fpext float [[TMP15]] to double // CHECK2-NEXT: [[ADD_I:%.*]] = fadd double [[CONV_I]], [[TMP13]] // CHECK2-NEXT: [[CONV1_I:%.*]] = fptrunc double [[ADD_I]] to float @@ -2497,7 +2497,7 @@ // CHECK2-51-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK2-51-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK2-51: arrayctor.cont: -// CHECK2-51-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP1:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK2-51-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK2-51-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -2508,12 +2508,12 @@ // CHECK2-51-NEXT: store ptr [[B]], ptr [[TMP5]], align 8 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK2-51-NEXT: store ptr [[S]], ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 // CHECK2-51-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK2-51-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP8]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP11]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK2-51-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP9]], i32 0, i32 4 // CHECK2-51-NEXT: store i32 [[CONV]], ptr [[TMP12]], align 8 @@ -2523,7 +2523,7 @@ // CHECK2-51-NEXT: [[TMP15:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK2-51-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP15]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP16]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 +// CHECK2-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP18]], ptr align 8 [[AGG_CAPTURED1]], i64 8, i1 false) // CHECK2-51-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP20:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP19]], i64 0 @@ -2584,12 +2584,12 @@ // CHECK2-51-NEXT: store i64 4, ptr [[TMP56]], align 8 // CHECK2-51-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP54]], i32 0, i32 2 // CHECK2-51-NEXT: store i8 3, ptr [[TMP57]], align 8 -// CHECK2-51-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP58:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP59:%.*]] = sext i8 [[TMP58]] to i64 // CHECK2-51-NEXT: [[TMP60:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP60]] // CHECK2-51-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX7]], i64 [[TMP59]] -// CHECK2-51-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP61:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP62:%.*]] = sext i8 [[TMP61]] to i64 // CHECK2-51-NEXT: [[TMP63:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP63]] @@ -2622,12 +2622,12 @@ // CHECK2-51-NEXT: store i64 4, ptr [[TMP81]], align 8 // CHECK2-51-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP79]], i32 0, i32 2 // CHECK2-51-NEXT: store i8 4, ptr [[TMP82]], align 8 -// CHECK2-51-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP83:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP84:%.*]] = sext i8 [[TMP83]] to i64 // CHECK2-51-NEXT: [[TMP85:%.*]] = mul nsw i64 4, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP85]] // CHECK2-51-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX15]], i64 [[TMP84]] -// CHECK2-51-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP86:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP87:%.*]] = sext i8 [[TMP86]] to i64 // CHECK2-51-NEXT: [[TMP88:%.*]] = mul nsw i64 9, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP88]] @@ -2670,10 +2670,10 @@ // CHECK2-51-NEXT: [[TMP112:%.*]] = mul nsw i64 0, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP112]] // CHECK2-51-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 3 -// CHECK2-51-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP113:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP114:%.*]] = sext i32 [[TMP113]] to i64 // CHECK2-51-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP114]], 1 -// CHECK2-51-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP115:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP116:%.*]] = sext i32 [[TMP115]] to i64 // CHECK2-51-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP116]] // CHECK2-51-NEXT: [[TMP117:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP2]] @@ -2715,10 +2715,10 @@ // CHECK2-51-NEXT: [[TMP140:%.*]] = mul nsw i64 0, [[TMP2]] // CHECK2-51-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP140]] // CHECK2-51-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX31]], i64 3 -// CHECK2-51-NEXT: [[TMP141:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP141:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP142:%.*]] = sext i32 [[TMP141]] to i64 // CHECK2-51-NEXT: [[LEN_SUB_133:%.*]] = sub nsw i64 [[TMP142]], 1 -// CHECK2-51-NEXT: [[TMP143:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP143:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP144:%.*]] = sext i32 [[TMP143]] to i64 // CHECK2-51-NEXT: [[LB_ADD_LEN34:%.*]] = add nsw i64 -1, [[TMP144]] // CHECK2-51-NEXT: [[TMP145:%.*]] = mul nsw i64 [[LB_ADD_LEN34]], [[TMP2]] @@ -2747,26 +2747,26 @@ // CHECK2-51-NEXT: [[TMP161:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP159]]) // CHECK2-51-NEXT: [[TMP162:%.*]] = getelementptr inbounds [[STRUCT_ANON_16]], ptr [[AGG_CAPTURED40]], i32 0, i32 0 // CHECK2-51-NEXT: store ptr [[C]], ptr [[TMP162]], align 8 -// CHECK2-51-NEXT: [[TMP163:%.*]] = load i8, ptr [[B]], align 1 +// CHECK2-51-NEXT: [[TMP163:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP163]], 0 // CHECK2-51-NEXT: [[TMP164:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK2-51-NEXT: [[TMP165:%.*]] = or i32 [[TMP164]], 1 // CHECK2-51-NEXT: [[TMP166:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 [[TMP165]], i64 40, i64 8, ptr @.omp_task_entry..18) // CHECK2-51-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP166]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP167]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP169:%.*]] = load ptr, ptr [[TMP168]], align 8 +// CHECK2-51-NEXT: [[TMP169:%.*]] = load ptr, ptr [[TMP168]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP169]], ptr align 8 [[AGG_CAPTURED40]], i64 8, i1 false) // CHECK2-51-NEXT: [[TMP170:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP166]]) // CHECK2-51-NEXT: [[TMP171:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 0, i64 256, i64 1, ptr @.omp_task_entry..23) // CHECK2-51-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21:%.*]], ptr [[TMP171]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21]], ptr [[TMP171]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_22:%.*]], ptr [[TMP173]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP175:%.*]] = load i32, ptr [[C]], align 128 +// CHECK2-51-NEXT: [[TMP175:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP175]], ptr [[TMP174]], align 128 // CHECK2-51-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP172]], i32 0, i32 2 // CHECK2-51-NEXT: store i32 0, ptr [[TMP176]], align 16 // CHECK2-51-NEXT: [[TMP177:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP171]]) -// CHECK2-51-NEXT: [[TMP178:%.*]] = load i32, ptr @a, align 4 +// CHECK2-51-NEXT: [[TMP178:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP178]], ptr [[RETVAL]], align 4 // CHECK2-51-NEXT: [[TMP179:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK2-51-NEXT: call void @llvm.stackrestore(ptr [[TMP179]]) @@ -2807,25 +2807,25 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK2-51-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK2-51-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-51-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -2848,24 +2848,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK2-51-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK2-51-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-51-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK2-51-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -2886,48 +2886,48 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-51-NEXT: ] // CHECK2-51: .untied.done..i: -// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2-51: .untied.jmp..i: -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK2-51-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK2-51-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK2-51: .untied.jmp.1.i: -// CHECK2-51-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 +// CHECK2-51-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK2-51-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[TMP15]], ptr @.gomp_critical_user_.var) -// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-51-NEXT: br label [[CLEANUP_I]] // CHECK2-51: cleanup.i: -// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK2-51: .omp_outlined..3.exit: // CHECK2-51-NEXT: ret i32 0 @@ -2947,45 +2947,45 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-51-NEXT: ] // CHECK2-51: .untied.done..i: -// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2-51: .untied.jmp..i: -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK2-51-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK2-51-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK2-51: .untied.jmp.1.i: -// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-51-NEXT: br label [[CLEANUP_I]] // CHECK2-51: cleanup.i: -// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK2-51: .omp_outlined..5.exit: // CHECK2-51-NEXT: ret i32 0 @@ -3005,45 +3005,45 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK2-51-NEXT: ] // CHECK2-51: .untied.done..i: -// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2-51: .untied.jmp..i: -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK2-51-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK2-51-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP12]], ptr [[TMP13]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK2-51: .untied.jmp.1.i: -// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-51-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-51-NEXT: br label [[CLEANUP_I]] // CHECK2-51: cleanup.i: -// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK2-51: .omp_outlined..7.exit: // CHECK2-51-NEXT: ret i32 0 @@ -3062,24 +3062,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK2-51-NEXT: ret i32 0 // // @@ -3096,24 +3096,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK2-51-NEXT: ret i32 0 // // @@ -3130,24 +3130,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !82 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK2-51-NEXT: store i32 2, ptr @a, align 4, !noalias !83 // CHECK2-51-NEXT: ret i32 0 // // @@ -3164,24 +3164,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK2-51-NEXT: store i32 3, ptr @a, align 4, !noalias !92 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK2-51-NEXT: store i32 3, ptr @a, align 4, !noalias !93 // CHECK2-51-NEXT: ret i32 0 // // @@ -3198,24 +3198,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK2-51-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK2-51-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK2-51-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK2-51-NEXT: ret i32 0 @@ -3249,30 +3249,30 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20]], ptr [[TMP3]], i32 0, i32 2 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 // CHECK2-51-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK2-51-NEXT: store i32 4, ptr @a, align 4, !noalias !112 +// CHECK2-51-NEXT: store i32 4, ptr @a, align 4, !noalias !113 // CHECK2-51-NEXT: ret i32 0 // // @@ -3328,33 +3328,33 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21]], ptr [[TMP3]], i32 0, i32 2 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..22, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..22, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK2-51-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK2-51-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4 +// CHECK2-51-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK2-51-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: switch i32 [[TMP17]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK2-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK2-51-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -3364,49 +3364,49 @@ // CHECK2-51-NEXT: i32 5, label [[DOTUNTIED_JMP_10_I:%.*]] // CHECK2-51-NEXT: ] // CHECK2-51: .untied.done..i: -// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK2-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK2-51: .untied.jmp..i: -// CHECK2-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: store i32 1, ptr [[TMP18]], align 4 -// CHECK2-51-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP19]], ptr [[TMP20]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT:%.*]] // CHECK2-51: .untied.jmp.2.i: // CHECK2-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) -// CHECK2-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[DOTS2__VOID_ADDR_I:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP22]], i64 4, ptr inttoptr (i64 7 to ptr)) // CHECK2-51-NEXT: store ptr [[DOTS2__VOID_ADDR_I]], ptr [[TMP14]], align 8 -// CHECK2-51-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: store i32 2, ptr [[TMP23]], align 4 -// CHECK2-51-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP24]], ptr [[TMP25]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK2-51: .untied.jmp.3.i: // CHECK2-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) // CHECK2-51-NEXT: store i32 0, ptr [[TMP15]], align 4 -// CHECK2-51-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP28:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP27]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..21) // CHECK2-51-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20:%.*]], ptr [[TMP28]], i32 0, i32 2 -// CHECK2-51-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK2-51-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP30]], ptr [[TMP29]], align 128 -// CHECK2-51-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP31]], ptr [[TMP28]]) -// CHECK2-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: store i32 3, ptr [[TMP33]], align 4 -// CHECK2-51-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP35:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP34]], ptr [[TMP35]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK2-51: .untied.jmp.5.i: -// CHECK2-51-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[TMP37]], i32 0) -// CHECK2-51-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: store i32 4, ptr [[TMP39]], align 4 -// CHECK2-51-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP42:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP40]], ptr [[TMP41]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK2-51: .untied.jmp.7.i: @@ -3414,23 +3414,23 @@ // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[TMP13]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false) // CHECK2-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] // CHECK2-51-NEXT: store i32 10, ptr [[TMP15]], align 4 -// CHECK2-51-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP44:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[TMP43]]) -// CHECK2-51-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP45:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: store i32 5, ptr [[TMP45]], align 4 -// CHECK2-51-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK2-51-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK2-51-NEXT: [[TMP46:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: [[TMP47:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK2-51-NEXT: [[TMP48:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP46]], ptr [[TMP47]]) // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK2-51: .untied.jmp.10.i: // CHECK2-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP15]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[TMP49:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @__kmpc_free(i32 [[TMP49]], ptr [[TMP15]], ptr inttoptr (i64 7 to ptr)) // CHECK2-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP13]]) #[[ATTR4]] -// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK2-51-NEXT: br label [[CLEANUP_I]] // CHECK2-51: cleanup.i: -// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK2-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK2-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK2-51: .omp_outlined..19.exit: // CHECK2-51-NEXT: ret i32 0 @@ -3496,7 +3496,7 @@ // CHECK2-51-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..25) // CHECK2-51-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP2]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP3]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK2-51-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP5]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK2-51-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP2]]) // CHECK2-51-NEXT: ret void @@ -3515,24 +3515,24 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META123:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META126:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META128:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META130:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !132 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META124:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META127:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META129:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META131:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !133 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -3565,7 +3565,7 @@ // CHECK2-51-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK2-51-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK2-51-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_single(ptr @[[GLOB1]], i32 [[TMP2]]) // CHECK2-51-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK2-51-NEXT: br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]] @@ -3575,11 +3575,11 @@ // CHECK2-51-NEXT: [[TMP6:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i64 48, i64 8, ptr @.omp_task_entry..29) // CHECK2-51-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26:%.*]], ptr [[TMP6]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP7]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP9]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK2-51-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26]], ptr [[TMP6]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_27:%.*]], ptr [[TMP10]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load double, ptr [[B]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store double [[TMP12]], ptr [[TMP11]], align 8 // CHECK2-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP2]], ptr [[TMP6]]) // CHECK2-51-NEXT: call void @__kmpc_end_single(ptr @[[GLOB1]], i32 [[TMP2]]) @@ -3617,31 +3617,31 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META133:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META136:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META138:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META140:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !142 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..28, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !142 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META134:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META137:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META139:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META141:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !143 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..28, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !143 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !142 -// CHECK2-51-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !143 +// CHECK2-51-NEXT: [[TMP13:%.*]] = load double, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK2-51-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4 +// CHECK2-51-NEXT: [[TMP15:%.*]] = load float, ptr [[TMP14]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[CONV_I:%.*]] = fpext float [[TMP15]] to double // CHECK2-51-NEXT: [[ADD_I:%.*]] = fadd double [[CONV_I]], [[TMP13]] // CHECK2-51-NEXT: [[CONV1_I:%.*]] = fptrunc double [[ADD_I]] to float @@ -3677,7 +3677,7 @@ // CHECK2-51-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29:%.*]], ptr [[TMP1]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29]], ptr [[TMP1]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_30:%.*]], ptr [[TMP3]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4 +// CHECK2-51-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP5]], ptr [[TMP4]], align 8 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP7:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP6]], i64 0 @@ -3701,7 +3701,7 @@ // CHECK2-51-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32:%.*]], ptr [[TMP17]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32]], ptr [[TMP17]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_33:%.*]], ptr [[TMP19]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4 +// CHECK2-51-NEXT: [[TMP21:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP21]], ptr [[TMP20]], align 8 // CHECK2-51-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR2]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP23:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP22]], i64 0 @@ -3725,7 +3725,7 @@ // CHECK2-51-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35:%.*]], ptr [[TMP33]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35]], ptr [[TMP33]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_36:%.*]], ptr [[TMP35]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4 +// CHECK2-51-NEXT: [[TMP37:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP37]], ptr [[TMP36]], align 8 // CHECK2-51-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR5]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP39:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP38]], i64 0 @@ -3749,7 +3749,7 @@ // CHECK2-51-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38:%.*]], ptr [[TMP49]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38]], ptr [[TMP49]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_39:%.*]], ptr [[TMP51]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP53:%.*]] = load i32, ptr [[A]], align 4 +// CHECK2-51-NEXT: [[TMP53:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP53]], ptr [[TMP52]], align 8 // CHECK2-51-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR8]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP55:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP54]], i64 0 @@ -3773,7 +3773,7 @@ // CHECK2-51-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_41:%.*]], ptr [[TMP65]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_41]], ptr [[TMP65]], i32 0, i32 1 // CHECK2-51-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_42:%.*]], ptr [[TMP67]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP69:%.*]] = load i32, ptr [[A]], align 4 +// CHECK2-51-NEXT: [[TMP69:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: store i32 [[TMP69]], ptr [[TMP68]], align 8 // CHECK2-51-NEXT: [[TMP70:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR11]], i64 0, i64 0 // CHECK2-51-NEXT: [[TMP71:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP70]], i64 0 @@ -3816,28 +3816,28 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META145:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META148:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META150:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META152:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !154 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..31, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !154 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !154 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META146:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META149:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META151:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META153:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !155 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..31, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !155 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !155 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !154 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !155 // CHECK2-51-NEXT: store i32 13, ptr [[TMP12]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -3870,28 +3870,28 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META155:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META158:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META160:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META162:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !164 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..34, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !164 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !164 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META156:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META159:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META161:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META163:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !165 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..34, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !165 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !165 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !164 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !165 // CHECK2-51-NEXT: store i32 14, ptr [[TMP12]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -3924,28 +3924,28 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META165:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META168:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META170:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META172:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !174 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..37, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !174 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !174 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META166:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META169:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META171:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META173:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !175 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..37, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !175 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !175 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !174 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !175 // CHECK2-51-NEXT: store i32 15, ptr [[TMP12]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -3978,28 +3978,28 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META175:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META178:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META180:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META182:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !184 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..40, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !184 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !184 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META176:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META179:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META181:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META183:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !185 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..40, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !185 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !185 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !184 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !185 // CHECK2-51-NEXT: store i32 16, ptr [[TMP12]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -4032,28 +4032,28 @@ // CHECK2-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK2-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK2-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK2-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK2-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_41:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK2-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK2-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK2-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK2-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_41]], ptr [[TMP3]], i32 0, i32 1 -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META185:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META188:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META190:![0-9]+]]) -// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META192:![0-9]+]]) -// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !194 -// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..43, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !194 -// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !194 +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META186:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META189:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META191:![0-9]+]]) +// CHECK2-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META193:![0-9]+]]) +// CHECK2-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !195 +// CHECK2-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: store ptr @.omp_task_privates_map..43, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !195 +// CHECK2-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !195 // CHECK2-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !194 +// CHECK2-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !195 // CHECK2-51-NEXT: store i32 17, ptr [[TMP12]], align 4 // CHECK2-51-NEXT: ret i32 0 // @@ -4104,7 +4104,7 @@ // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK3: arrayctor.cont: -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -4115,13 +4115,13 @@ // CHECK3-NEXT: store ptr [[B]], ptr [[TMP4]], align 8 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK3-NEXT: store ptr [[S]], ptr [[TMP5]], align 8 -// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) // CHECK3-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP7]], i32 0, i32 0 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP8]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP10]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP8]], i32 0, i32 4 // CHECK3-NEXT: store i32 [[CONV]], ptr [[TMP11]], align 8 @@ -4133,7 +4133,7 @@ // CHECK3-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED2]], i64 8, i1 false) // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 @@ -4198,12 +4198,12 @@ // CHECK3-NEXT: store i64 4, ptr [[TMP55]], align 8 // CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP53]], i32 0, i32 2 // CHECK3-NEXT: store i8 3, ptr [[TMP56]], align 8 -// CHECK3-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP58:%.*]] = sext i8 [[TMP57]] to i64 // CHECK3-NEXT: [[TMP59:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP59]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX13]], i64 [[TMP58]] -// CHECK3-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP61:%.*]] = sext i8 [[TMP60]] to i64 // CHECK3-NEXT: [[TMP62:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP62]] @@ -4238,12 +4238,12 @@ // CHECK3-NEXT: store i64 4, ptr [[TMP80]], align 8 // CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP78]], i32 0, i32 2 // CHECK3-NEXT: store i8 4, ptr [[TMP81]], align 8 -// CHECK3-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP83:%.*]] = sext i8 [[TMP82]] to i64 // CHECK3-NEXT: [[TMP84:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP84]] // CHECK3-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 [[TMP83]] -// CHECK3-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP86:%.*]] = sext i8 [[TMP85]] to i64 // CHECK3-NEXT: [[TMP87:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP87]] @@ -4288,10 +4288,10 @@ // CHECK3-NEXT: [[TMP111:%.*]] = mul nsw i64 0, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP111]] // CHECK3-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX33]], i64 3 -// CHECK3-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4 +// CHECK3-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP113:%.*]] = sext i32 [[TMP112]] to i64 // CHECK3-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP113]], 1 -// CHECK3-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4 +// CHECK3-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP115:%.*]] = sext i32 [[TMP114]] to i64 // CHECK3-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP115]] // CHECK3-NEXT: [[TMP116:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP1]] @@ -4325,7 +4325,7 @@ // CHECK3-NEXT: [[TMP132:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM44]], ptr [[TMP130]]) // CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT_ANON_14]], ptr [[AGG_CAPTURED45]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[C]], ptr [[TMP133]], align 8 -// CHECK3-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP134]], 0 // CHECK3-NEXT: [[TMP135:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK3-NEXT: [[TMP136:%.*]] = or i32 [[TMP135]], 1 @@ -4333,7 +4333,7 @@ // CHECK3-NEXT: [[TMP137:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM46]], i32 [[TMP136]], i64 40, i64 8, ptr @.omp_task_entry..16) // CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP137]], i32 0, i32 0 // CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP138]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8 +// CHECK3-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP140]], ptr align 8 [[AGG_CAPTURED45]], i64 8, i1 false) // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM47:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB19]]) // CHECK3-NEXT: [[TMP141:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM47]], ptr [[TMP137]]) @@ -4342,13 +4342,13 @@ // CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP142]], i32 0, i32 0 // CHECK3-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP142]], i32 0, i32 2 // CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_20:%.*]], ptr [[TMP144]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128 +// CHECK3-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP146]], ptr [[TMP145]], align 128 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) // CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP143]], i32 0, i32 2 // CHECK3-NEXT: store i32 0, ptr [[TMP147]], align 16 // CHECK3-NEXT: [[TMP148:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM50]], ptr [[TMP142]]) -// CHECK3-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 +// CHECK3-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK3-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK3-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) @@ -4389,25 +4389,25 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -4430,24 +4430,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK3-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK3-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK3-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -4468,48 +4468,48 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-NEXT: ] // CHECK3: .untied.done..i: -// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3: .untied.jmp..i: -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK3: .untied.jmp.1.i: // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM2_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK3-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-NEXT: br label [[CLEANUP_I]] // CHECK3: cleanup.i: -// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK3: .omp_outlined..3.exit: // CHECK3-NEXT: ret i32 0 @@ -4529,45 +4529,45 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-NEXT: ] // CHECK3: .untied.done..i: -// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3: .untied.jmp..i: -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK3: .untied.jmp.1.i: -// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-NEXT: br label [[CLEANUP_I]] // CHECK3: cleanup.i: -// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK3: .omp_outlined..5.exit: // CHECK3-NEXT: ret i32 0 @@ -4587,45 +4587,45 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-NEXT: ] // CHECK3: .untied.done..i: -// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3: .untied.jmp..i: -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB11]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK3: .untied.jmp.1.i: -// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-NEXT: br label [[CLEANUP_I]] // CHECK3: cleanup.i: -// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK3: .omp_outlined..7.exit: // CHECK3-NEXT: ret i32 0 @@ -4644,24 +4644,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK3-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK3-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK3-NEXT: ret i32 0 // // @@ -4678,24 +4678,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK3-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK3-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK3-NEXT: ret i32 0 // // @@ -4712,24 +4712,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK3-NEXT: store i32 3, ptr @a, align 4, !noalias !82 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK3-NEXT: store i32 3, ptr @a, align 4, !noalias !83 // CHECK3-NEXT: ret i32 0 // // @@ -4746,24 +4746,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK3-NEXT: store i32 4, ptr @a, align 4, !noalias !92 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK3-NEXT: store i32 4, ptr @a, align 4, !noalias !93 // CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK3-NEXT: ret i32 0 @@ -4797,30 +4797,30 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18]], ptr [[TMP3]], i32 0, i32 2 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !102 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !103 // CHECK3-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK3-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK3-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK3-NEXT: ret i32 0 // // @@ -4878,32 +4878,32 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP3]], i32 0, i32 2 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: switch i32 [[TMP16]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -4912,61 +4912,61 @@ // CHECK3-NEXT: i32 4, label [[DOTUNTIED_JMP_15_I:%.*]] // CHECK3-NEXT: ] // CHECK3: .untied.done..i: -// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK3-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK3-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3: .untied.jmp..i: -// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: store i32 1, ptr [[TMP17]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP18]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT:%.*]] // CHECK3: .untied.jmp.2.i: // CHECK3-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) // CHECK3-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) -// CHECK3-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !112 +// CHECK3-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !113 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM3_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23:[0-9]+]]) // CHECK3-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3_I]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..19) // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP20]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 128 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM4_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) // CHECK3-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM4_I]], ptr [[TMP20]]) -// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: store i32 2, ptr [[TMP24]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM5_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM5_I]], ptr [[TMP25]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK3: .untied.jmp.6.i: // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM8_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8_I]], i32 0) -// CHECK3-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: store i32 3, ptr [[TMP28]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM9_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK3-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM9_I]], ptr [[TMP29]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK3: .untied.jmp.10.i: // CHECK3-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) -// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !112 +// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !113 // CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] -// CHECK3-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !112 +// CHECK3-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !113 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM13_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM13_I]]) -// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: store i32 4, ptr [[TMP32]], align 4 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM14_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK3-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK3-NEXT: [[TMP34:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM14_I]], ptr [[TMP33]]) // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK3: .untied.jmp.15.i: // CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) #[[ATTR4]] // CHECK3-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) #[[ATTR4]] -// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK3-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK3-NEXT: br label [[CLEANUP_I]] // CHECK3: cleanup.i: -// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK3-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK3: .omp_outlined..17.exit: // CHECK3-NEXT: ret i32 0 @@ -5032,7 +5032,7 @@ // CHECK3-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..23) // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25]]) // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]], ptr [[TMP1]]) @@ -5052,24 +5052,24 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK3-NEXT: ret i32 0 // @@ -5120,7 +5120,7 @@ // CHECK4-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK4-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK4: arrayctor.cont: -// CHECK4-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 +// CHECK4-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK4-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK4-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK4-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -5131,13 +5131,13 @@ // CHECK4-NEXT: store ptr [[B]], ptr [[TMP4]], align 8 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK4-NEXT: store ptr [[S]], ptr [[TMP5]], align 8 -// CHECK4-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) // CHECK4-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP7]], i32 0, i32 0 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP8]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 +// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP10]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP8]], i32 0, i32 4 // CHECK4-NEXT: store i32 [[CONV]], ptr [[TMP11]], align 8 @@ -5149,7 +5149,7 @@ // CHECK4-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK4-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED2]], i64 8, i1 false) // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 @@ -5214,12 +5214,12 @@ // CHECK4-NEXT: store i64 4, ptr [[TMP55]], align 8 // CHECK4-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP53]], i32 0, i32 2 // CHECK4-NEXT: store i8 3, ptr [[TMP56]], align 8 -// CHECK4-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP58:%.*]] = sext i8 [[TMP57]] to i64 // CHECK4-NEXT: [[TMP59:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK4-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP59]] // CHECK4-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX13]], i64 [[TMP58]] -// CHECK4-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP61:%.*]] = sext i8 [[TMP60]] to i64 // CHECK4-NEXT: [[TMP62:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK4-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP62]] @@ -5254,12 +5254,12 @@ // CHECK4-NEXT: store i64 4, ptr [[TMP80]], align 8 // CHECK4-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP78]], i32 0, i32 2 // CHECK4-NEXT: store i8 4, ptr [[TMP81]], align 8 -// CHECK4-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP83:%.*]] = sext i8 [[TMP82]] to i64 // CHECK4-NEXT: [[TMP84:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK4-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP84]] // CHECK4-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 [[TMP83]] -// CHECK4-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP86:%.*]] = sext i8 [[TMP85]] to i64 // CHECK4-NEXT: [[TMP87:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK4-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP87]] @@ -5304,10 +5304,10 @@ // CHECK4-NEXT: [[TMP111:%.*]] = mul nsw i64 0, [[TMP1]] // CHECK4-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP111]] // CHECK4-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX33]], i64 3 -// CHECK4-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4 +// CHECK4-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP113:%.*]] = sext i32 [[TMP112]] to i64 // CHECK4-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP113]], 1 -// CHECK4-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4 +// CHECK4-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP115:%.*]] = sext i32 [[TMP114]] to i64 // CHECK4-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP115]] // CHECK4-NEXT: [[TMP116:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP1]] @@ -5341,7 +5341,7 @@ // CHECK4-NEXT: [[TMP132:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM44]], ptr [[TMP130]]) // CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT_ANON_14]], ptr [[AGG_CAPTURED45]], i32 0, i32 0 // CHECK4-NEXT: store ptr [[C]], ptr [[TMP133]], align 8 -// CHECK4-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP134]], 0 // CHECK4-NEXT: [[TMP135:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK4-NEXT: [[TMP136:%.*]] = or i32 [[TMP135]], 1 @@ -5349,7 +5349,7 @@ // CHECK4-NEXT: [[TMP137:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM46]], i32 [[TMP136]], i64 40, i64 8, ptr @.omp_task_entry..16) // CHECK4-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP137]], i32 0, i32 0 // CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP138]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8 +// CHECK4-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP140]], ptr align 8 [[AGG_CAPTURED45]], i64 8, i1 false) // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM47:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB19]]) // CHECK4-NEXT: [[TMP141:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM47]], ptr [[TMP137]]) @@ -5358,13 +5358,13 @@ // CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP142]], i32 0, i32 0 // CHECK4-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP142]], i32 0, i32 2 // CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_20:%.*]], ptr [[TMP144]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128 +// CHECK4-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP146]], ptr [[TMP145]], align 128 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) // CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP143]], i32 0, i32 2 // CHECK4-NEXT: store i32 0, ptr [[TMP147]], align 16 // CHECK4-NEXT: [[TMP148:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM50]], ptr [[TMP142]]) -// CHECK4-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 +// CHECK4-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK4-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK4-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) @@ -5405,25 +5405,25 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK4-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK4-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -5446,24 +5446,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK4-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK4-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK4-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -5484,48 +5484,48 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-NEXT: ] // CHECK4: .untied.done..i: -// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4: .untied.jmp..i: -// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK4-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) -// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK4-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK4: .untied.jmp.1.i: // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM2_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK4-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-NEXT: br label [[CLEANUP_I]] // CHECK4: cleanup.i: -// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK4: .omp_outlined..3.exit: // CHECK4-NEXT: ret i32 0 @@ -5545,45 +5545,45 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-NEXT: ] // CHECK4: .untied.done..i: -// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4: .untied.jmp..i: -// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK4-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9]]) -// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK4-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK4: .untied.jmp.1.i: -// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-NEXT: br label [[CLEANUP_I]] // CHECK4: cleanup.i: -// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK4: .omp_outlined..5.exit: // CHECK4-NEXT: ret i32 0 @@ -5603,45 +5603,45 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-NEXT: ] // CHECK4: .untied.done..i: -// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4: .untied.jmp..i: -// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK4-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB11]]) -// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK4-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK4: .untied.jmp.1.i: -// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-NEXT: br label [[CLEANUP_I]] // CHECK4: cleanup.i: -// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK4: .omp_outlined..7.exit: // CHECK4-NEXT: ret i32 0 @@ -5660,24 +5660,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK4-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK4-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK4-NEXT: ret i32 0 // // @@ -5694,24 +5694,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK4-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK4-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK4-NEXT: ret i32 0 // // @@ -5728,24 +5728,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK4-NEXT: store i32 3, ptr @a, align 4, !noalias !82 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK4-NEXT: store i32 3, ptr @a, align 4, !noalias !83 // CHECK4-NEXT: ret i32 0 // // @@ -5762,24 +5762,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK4-NEXT: store i32 4, ptr @a, align 4, !noalias !92 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK4-NEXT: store i32 4, ptr @a, align 4, !noalias !93 // CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK4-NEXT: ret i32 0 @@ -5813,30 +5813,30 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18]], ptr [[TMP3]], i32 0, i32 2 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 // CHECK4-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !102 +// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !103 // CHECK4-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK4-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK4-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK4-NEXT: ret i32 0 // // @@ -5894,32 +5894,32 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP3]], i32 0, i32 2 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK4-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: switch i32 [[TMP16]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -5928,61 +5928,61 @@ // CHECK4-NEXT: i32 4, label [[DOTUNTIED_JMP_15_I:%.*]] // CHECK4-NEXT: ] // CHECK4: .untied.done..i: -// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4: .untied.jmp..i: -// CHECK4-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: store i32 1, ptr [[TMP17]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP18]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT:%.*]] // CHECK4: .untied.jmp.2.i: // CHECK4-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) // CHECK4-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) -// CHECK4-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !112 +// CHECK4-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !113 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM3_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23:[0-9]+]]) // CHECK4-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3_I]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..19) // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP20]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK4-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 128 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM4_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) // CHECK4-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM4_I]], ptr [[TMP20]]) -// CHECK4-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: store i32 2, ptr [[TMP24]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM5_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM5_I]], ptr [[TMP25]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4: .untied.jmp.6.i: // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM8_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8_I]], i32 0) -// CHECK4-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: store i32 3, ptr [[TMP28]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM9_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM9_I]], ptr [[TMP29]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4: .untied.jmp.10.i: // CHECK4-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) -// CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !112 +// CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !113 // CHECK4-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] -// CHECK4-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !112 +// CHECK4-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !113 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM13_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM13_I]]) -// CHECK4-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: store i32 4, ptr [[TMP32]], align 4 // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM14_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-NEXT: [[TMP34:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM14_I]], ptr [[TMP33]]) // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4: .untied.jmp.15.i: // CHECK4-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) #[[ATTR4]] // CHECK4-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) #[[ATTR4]] -// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-NEXT: br label [[CLEANUP_I]] // CHECK4: cleanup.i: -// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4: .omp_outlined..17.exit: // CHECK4-NEXT: ret i32 0 @@ -6048,7 +6048,7 @@ // CHECK4-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..23) // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP1]], i32 0, i32 0 // CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP2]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK4-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25]]) // CHECK4-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]], ptr [[TMP1]]) @@ -6068,24 +6068,24 @@ // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK4-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK4-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK4-NEXT: ret i32 0 // @@ -6139,7 +6139,7 @@ // CHECK3-51-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK3-51-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK3-51: arrayctor.cont: -// CHECK3-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-51-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK3-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-51-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -6150,13 +6150,13 @@ // CHECK3-51-NEXT: store ptr [[B]], ptr [[TMP4]], align 8 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK3-51-NEXT: store ptr [[S]], ptr [[TMP5]], align 8 -// CHECK3-51-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) // CHECK3-51-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP7]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP8]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP10]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK3-51-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP8]], i32 0, i32 4 // CHECK3-51-NEXT: store i32 [[CONV]], ptr [[TMP11]], align 8 @@ -6168,7 +6168,7 @@ // CHECK3-51-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK3-51-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK3-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED2]], i64 8, i1 false) // CHECK3-51-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 @@ -6233,12 +6233,12 @@ // CHECK3-51-NEXT: store i64 4, ptr [[TMP55]], align 8 // CHECK3-51-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP53]], i32 0, i32 2 // CHECK3-51-NEXT: store i8 3, ptr [[TMP56]], align 8 -// CHECK3-51-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP58:%.*]] = sext i8 [[TMP57]] to i64 // CHECK3-51-NEXT: [[TMP59:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP59]] // CHECK3-51-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX13]], i64 [[TMP58]] -// CHECK3-51-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP61:%.*]] = sext i8 [[TMP60]] to i64 // CHECK3-51-NEXT: [[TMP62:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP62]] @@ -6273,12 +6273,12 @@ // CHECK3-51-NEXT: store i64 4, ptr [[TMP80]], align 8 // CHECK3-51-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP78]], i32 0, i32 2 // CHECK3-51-NEXT: store i8 4, ptr [[TMP81]], align 8 -// CHECK3-51-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP83:%.*]] = sext i8 [[TMP82]] to i64 // CHECK3-51-NEXT: [[TMP84:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP84]] // CHECK3-51-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 [[TMP83]] -// CHECK3-51-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP86:%.*]] = sext i8 [[TMP85]] to i64 // CHECK3-51-NEXT: [[TMP87:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP87]] @@ -6323,10 +6323,10 @@ // CHECK3-51-NEXT: [[TMP111:%.*]] = mul nsw i64 0, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP111]] // CHECK3-51-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX33]], i64 3 -// CHECK3-51-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP113:%.*]] = sext i32 [[TMP112]] to i64 // CHECK3-51-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP113]], 1 -// CHECK3-51-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP115:%.*]] = sext i32 [[TMP114]] to i64 // CHECK3-51-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP115]] // CHECK3-51-NEXT: [[TMP116:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP1]] @@ -6370,10 +6370,10 @@ // CHECK3-51-NEXT: [[TMP139:%.*]] = mul nsw i64 0, [[TMP1]] // CHECK3-51-NEXT: [[ARRAYIDX43:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP139]] // CHECK3-51-NEXT: [[ARRAYIDX44:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX43]], i64 3 -// CHECK3-51-NEXT: [[TMP140:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP140:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 // CHECK3-51-NEXT: [[LEN_SUB_145:%.*]] = sub nsw i64 [[TMP141]], 1 -// CHECK3-51-NEXT: [[TMP142:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP142:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP143:%.*]] = sext i32 [[TMP142]] to i64 // CHECK3-51-NEXT: [[LB_ADD_LEN46:%.*]] = add nsw i64 -1, [[TMP143]] // CHECK3-51-NEXT: [[TMP144:%.*]] = mul nsw i64 [[LB_ADD_LEN46]], [[TMP1]] @@ -6407,7 +6407,7 @@ // CHECK3-51-NEXT: [[TMP160:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM56]], ptr [[TMP158]]) // CHECK3-51-NEXT: [[TMP161:%.*]] = getelementptr inbounds [[STRUCT_ANON_16]], ptr [[AGG_CAPTURED57]], i32 0, i32 0 // CHECK3-51-NEXT: store ptr [[C]], ptr [[TMP161]], align 8 -// CHECK3-51-NEXT: [[TMP162:%.*]] = load i8, ptr [[B]], align 1 +// CHECK3-51-NEXT: [[TMP162:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP162]], 0 // CHECK3-51-NEXT: [[TMP163:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK3-51-NEXT: [[TMP164:%.*]] = or i32 [[TMP163]], 1 @@ -6415,7 +6415,7 @@ // CHECK3-51-NEXT: [[TMP165:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM58]], i32 [[TMP164]], i64 40, i64 8, ptr @.omp_task_entry..18) // CHECK3-51-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP165]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP166]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP168:%.*]] = load ptr, ptr [[TMP167]], align 8 +// CHECK3-51-NEXT: [[TMP168:%.*]] = load ptr, ptr [[TMP167]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP168]], ptr align 8 [[AGG_CAPTURED57]], i64 8, i1 false) // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM59:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) // CHECK3-51-NEXT: [[TMP169:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM59]], ptr [[TMP165]]) @@ -6424,13 +6424,13 @@ // CHECK3-51-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21:%.*]], ptr [[TMP170]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21]], ptr [[TMP170]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_22:%.*]], ptr [[TMP172]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP174:%.*]] = load i32, ptr [[C]], align 128 +// CHECK3-51-NEXT: [[TMP174:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP174]], ptr [[TMP173]], align 128 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM62:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) // CHECK3-51-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP171]], i32 0, i32 2 // CHECK3-51-NEXT: store i32 0, ptr [[TMP175]], align 16 // CHECK3-51-NEXT: [[TMP176:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM62]], ptr [[TMP170]]) -// CHECK3-51-NEXT: [[TMP177:%.*]] = load i32, ptr @a, align 4 +// CHECK3-51-NEXT: [[TMP177:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP177]], ptr [[RETVAL]], align 4 // CHECK3-51-NEXT: [[TMP178:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK3-51-NEXT: call void @llvm.stackrestore(ptr [[TMP178]]) @@ -6471,25 +6471,25 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-51-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-51-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-51-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -6512,24 +6512,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK3-51-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK3-51-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-51-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK3-51-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -6550,48 +6550,48 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-51-NEXT: ] // CHECK3-51: .untied.done..i: -// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3-51: .untied.jmp..i: -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK3-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK3-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK3-51: .untied.jmp.1.i: // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM2_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-51-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK3-51-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-51-NEXT: br label [[CLEANUP_I]] // CHECK3-51: cleanup.i: -// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK3-51: .omp_outlined..3.exit: // CHECK3-51-NEXT: ret i32 0 @@ -6611,45 +6611,45 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-51-NEXT: ] // CHECK3-51: .untied.done..i: -// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3-51: .untied.jmp..i: -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK3-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9]]) -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK3-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK3-51: .untied.jmp.1.i: -// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-51-NEXT: br label [[CLEANUP_I]] // CHECK3-51: cleanup.i: -// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK3-51: .omp_outlined..5.exit: // CHECK3-51-NEXT: ret i32 0 @@ -6669,45 +6669,45 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK3-51-NEXT: ] // CHECK3-51: .untied.done..i: -// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3-51: .untied.jmp..i: -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK3-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB11]]) -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK3-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK3-51: .untied.jmp.1.i: -// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-51-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-51-NEXT: br label [[CLEANUP_I]] // CHECK3-51: cleanup.i: -// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK3-51: .omp_outlined..7.exit: // CHECK3-51-NEXT: ret i32 0 @@ -6726,24 +6726,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK3-51-NEXT: ret i32 0 // // @@ -6760,24 +6760,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK3-51-NEXT: ret i32 0 // // @@ -6794,24 +6794,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !82 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK3-51-NEXT: store i32 2, ptr @a, align 4, !noalias !83 // CHECK3-51-NEXT: ret i32 0 // // @@ -6828,24 +6828,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK3-51-NEXT: store i32 3, ptr @a, align 4, !noalias !92 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK3-51-NEXT: store i32 3, ptr @a, align 4, !noalias !93 // CHECK3-51-NEXT: ret i32 0 // // @@ -6862,24 +6862,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK3-51-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK3-51-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK3-51-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK3-51-NEXT: ret i32 0 @@ -6913,30 +6913,30 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20]], ptr [[TMP3]], i32 0, i32 2 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 // CHECK3-51-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK3-51-NEXT: store i32 4, ptr @a, align 4, !noalias !112 +// CHECK3-51-NEXT: store i32 4, ptr @a, align 4, !noalias !113 // CHECK3-51-NEXT: ret i32 0 // // @@ -6994,32 +6994,32 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_21]], ptr [[TMP3]], i32 0, i32 2 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..22, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..22, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK3-51-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK3-51-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: switch i32 [[TMP16]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK3-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK3-51-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -7028,61 +7028,61 @@ // CHECK3-51-NEXT: i32 4, label [[DOTUNTIED_JMP_15_I:%.*]] // CHECK3-51-NEXT: ] // CHECK3-51: .untied.done..i: -// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK3-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK3-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK3-51: .untied.jmp..i: -// CHECK3-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: store i32 1, ptr [[TMP17]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) -// CHECK3-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP18]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT:%.*]] // CHECK3-51: .untied.jmp.2.i: // CHECK3-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) // CHECK3-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) -// CHECK3-51-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !122 +// CHECK3-51-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !123 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM3_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25:[0-9]+]]) // CHECK3-51-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3_I]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..21) // CHECK3-51-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_20:%.*]], ptr [[TMP20]], i32 0, i32 2 -// CHECK3-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK3-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 128 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM4_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25]]) // CHECK3-51-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM4_I]], ptr [[TMP20]]) -// CHECK3-51-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: store i32 2, ptr [[TMP24]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM5_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) -// CHECK3-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM5_I]], ptr [[TMP25]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK3-51: .untied.jmp.6.i: // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM8_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-51-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8_I]], i32 0) -// CHECK3-51-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: store i32 3, ptr [[TMP28]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM9_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) -// CHECK3-51-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM9_I]], ptr [[TMP29]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK3-51: .untied.jmp.10.i: // CHECK3-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) -// CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !122 +// CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !123 // CHECK3-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !122 +// CHECK3-51-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !123 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM13_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK3-51-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM13_I]]) -// CHECK3-51-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: store i32 4, ptr [[TMP32]], align 4 // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM14_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) -// CHECK3-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 +// CHECK3-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 // CHECK3-51-NEXT: [[TMP34:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM14_I]], ptr [[TMP33]]) // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK3-51: .untied.jmp.15.i: // CHECK3-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) #[[ATTR4]] // CHECK3-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK3-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK3-51-NEXT: br label [[CLEANUP_I]] // CHECK3-51: cleanup.i: -// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !122 +// CHECK3-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !123 // CHECK3-51-NEXT: br label [[DOTOMP_OUTLINED__19_EXIT]] // CHECK3-51: .omp_outlined..19.exit: // CHECK3-51-NEXT: ret i32 0 @@ -7148,7 +7148,7 @@ // CHECK3-51-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..25) // CHECK3-51-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP1]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK3-51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK3-51-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27]]) // CHECK3-51-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]], ptr [[TMP1]]) @@ -7168,24 +7168,24 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_24:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META123:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META126:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META128:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META130:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !132 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !132 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META124:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META127:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META129:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META131:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !133 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !133 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7218,7 +7218,7 @@ // CHECK3-51-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26:%.*]], ptr [[TMP0]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26]], ptr [[TMP0]], i32 0, i32 1 // CHECK3-51-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_27:%.*]], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-51-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP4]], ptr [[TMP3]], align 8 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP5]], i64 0 @@ -7244,7 +7244,7 @@ // CHECK3-51-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29:%.*]], ptr [[TMP16]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29]], ptr [[TMP16]], i32 0, i32 1 // CHECK3-51-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_30:%.*]], ptr [[TMP18]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP20:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-51-NEXT: [[TMP20:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP20]], ptr [[TMP19]], align 8 // CHECK3-51-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR4]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP22:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP21]], i64 0 @@ -7270,7 +7270,7 @@ // CHECK3-51-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32:%.*]], ptr [[TMP32]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32]], ptr [[TMP32]], i32 0, i32 1 // CHECK3-51-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_33:%.*]], ptr [[TMP34]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP36:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-51-NEXT: [[TMP36:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP36]], ptr [[TMP35]], align 8 // CHECK3-51-NEXT: [[TMP37:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR9]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP38:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP37]], i64 0 @@ -7296,7 +7296,7 @@ // CHECK3-51-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35:%.*]], ptr [[TMP48]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35]], ptr [[TMP48]], i32 0, i32 1 // CHECK3-51-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_36:%.*]], ptr [[TMP50]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-51-NEXT: [[TMP52:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP52]], ptr [[TMP51]], align 8 // CHECK3-51-NEXT: [[TMP53:%.*]] = getelementptr inbounds [2 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR14]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP54:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP53]], i64 0 @@ -7322,7 +7322,7 @@ // CHECK3-51-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38:%.*]], ptr [[TMP64]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38]], ptr [[TMP64]], i32 0, i32 1 // CHECK3-51-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_39:%.*]], ptr [[TMP66]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP68:%.*]] = load i32, ptr [[A]], align 4 +// CHECK3-51-NEXT: [[TMP68:%.*]] = load i32, ptr [[A]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: store i32 [[TMP68]], ptr [[TMP67]], align 8 // CHECK3-51-NEXT: [[TMP69:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR19]], i64 0, i64 0 // CHECK3-51-NEXT: [[TMP70:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP69]], i64 0 @@ -7366,28 +7366,28 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_26]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META133:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META136:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META138:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META140:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !142 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..27, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !142 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !142 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META134:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META137:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META139:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META141:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !143 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..27, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !143 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !143 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !142 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !143 // CHECK3-51-NEXT: store i32 13, ptr [[TMP12]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7420,28 +7420,28 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_29]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META143:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META146:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META148:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META150:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !152 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..30, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !152 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !152 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META144:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META147:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META149:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META151:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !153 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..30, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !153 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !153 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !152 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !153 // CHECK3-51-NEXT: store i32 14, ptr [[TMP12]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7474,28 +7474,28 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_32]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META153:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META156:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META158:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META160:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !162 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..33, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !162 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !162 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META154:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META157:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META159:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META161:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !163 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..33, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !163 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !163 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !162 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !163 // CHECK3-51-NEXT: store i32 15, ptr [[TMP12]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7528,28 +7528,28 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_35]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META163:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META166:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META168:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META170:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !172 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..36, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !172 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !172 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META164:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META167:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META169:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META171:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !173 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..36, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !173 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !173 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !172 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !173 // CHECK3-51-NEXT: store i32 16, ptr [[TMP12]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7582,28 +7582,28 @@ // CHECK3-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK3-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK3-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_38]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META173:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META176:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META178:![0-9]+]]) -// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META180:![0-9]+]]) -// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !182 -// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..39, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !182 -// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !182 +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META174:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META177:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META179:![0-9]+]]) +// CHECK3-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META181:![0-9]+]]) +// CHECK3-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !183 +// CHECK3-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: store ptr @.omp_task_privates_map..39, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !183 +// CHECK3-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !183 // CHECK3-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !182 +// CHECK3-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !183 // CHECK3-51-NEXT: store i32 17, ptr [[TMP12]], align 4 // CHECK3-51-NEXT: ret i32 0 // @@ -7654,7 +7654,7 @@ // CHECK4-51-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK4-51-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK4-51: arrayctor.cont: -// CHECK4-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4 +// CHECK4-51-NEXT: [[TMP0:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK4-51-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK4-51-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK4-51-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -7665,13 +7665,13 @@ // CHECK4-51-NEXT: store ptr [[B]], ptr [[TMP4]], align 8 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 // CHECK4-51-NEXT: store ptr [[S]], ptr [[TMP5]], align 8 -// CHECK4-51-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP6:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[CONV:%.*]] = sext i8 [[TMP6]] to i32 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) // CHECK4-51-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 33, i64 40, i64 16, ptr @.omp_task_entry.) // CHECK4-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP7]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP8]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP10]], ptr align 8 [[AGG_CAPTURED]], i64 16, i1 false) // CHECK4-51-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP8]], i32 0, i32 4 // CHECK4-51-NEXT: store i32 [[CONV]], ptr [[TMP11]], align 8 @@ -7683,7 +7683,7 @@ // CHECK4-51-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..2) // CHECK4-51-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP14]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP15]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 +// CHECK4-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP17]], ptr align 8 [[AGG_CAPTURED2]], i64 8, i1 false) // CHECK4-51-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 // CHECK4-51-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 @@ -7748,12 +7748,12 @@ // CHECK4-51-NEXT: store i64 4, ptr [[TMP55]], align 8 // CHECK4-51-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP53]], i32 0, i32 2 // CHECK4-51-NEXT: store i8 3, ptr [[TMP56]], align 8 -// CHECK4-51-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP57:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP58:%.*]] = sext i8 [[TMP57]] to i64 // CHECK4-51-NEXT: [[TMP59:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK4-51-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP59]] // CHECK4-51-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX13]], i64 [[TMP58]] -// CHECK4-51-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP60:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP61:%.*]] = sext i8 [[TMP60]] to i64 // CHECK4-51-NEXT: [[TMP62:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK4-51-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP62]] @@ -7788,12 +7788,12 @@ // CHECK4-51-NEXT: store i64 4, ptr [[TMP80]], align 8 // CHECK4-51-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP78]], i32 0, i32 2 // CHECK4-51-NEXT: store i8 4, ptr [[TMP81]], align 8 -// CHECK4-51-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP82:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP83:%.*]] = sext i8 [[TMP82]] to i64 // CHECK4-51-NEXT: [[TMP84:%.*]] = mul nsw i64 4, [[TMP1]] // CHECK4-51-NEXT: [[ARRAYIDX23:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP84]] // CHECK4-51-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX23]], i64 [[TMP83]] -// CHECK4-51-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP85:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP86:%.*]] = sext i8 [[TMP85]] to i64 // CHECK4-51-NEXT: [[TMP87:%.*]] = mul nsw i64 9, [[TMP1]] // CHECK4-51-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP87]] @@ -7838,10 +7838,10 @@ // CHECK4-51-NEXT: [[TMP111:%.*]] = mul nsw i64 0, [[TMP1]] // CHECK4-51-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP111]] // CHECK4-51-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX33]], i64 3 -// CHECK4-51-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4 +// CHECK4-51-NEXT: [[TMP112:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP113:%.*]] = sext i32 [[TMP112]] to i64 // CHECK4-51-NEXT: [[LEN_SUB_1:%.*]] = sub nsw i64 [[TMP113]], 1 -// CHECK4-51-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4 +// CHECK4-51-NEXT: [[TMP114:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP115:%.*]] = sext i32 [[TMP114]] to i64 // CHECK4-51-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP115]] // CHECK4-51-NEXT: [[TMP116:%.*]] = mul nsw i64 [[LB_ADD_LEN]], [[TMP1]] @@ -7875,7 +7875,7 @@ // CHECK4-51-NEXT: [[TMP132:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM44]], ptr [[TMP130]]) // CHECK4-51-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT_ANON_14]], ptr [[AGG_CAPTURED45]], i32 0, i32 0 // CHECK4-51-NEXT: store ptr [[C]], ptr [[TMP133]], align 8 -// CHECK4-51-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1 +// CHECK4-51-NEXT: [[TMP134:%.*]] = load i8, ptr [[B]], align 1, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP134]], 0 // CHECK4-51-NEXT: [[TMP135:%.*]] = select i1 [[TOBOOL]], i32 2, i32 0 // CHECK4-51-NEXT: [[TMP136:%.*]] = or i32 [[TMP135]], 1 @@ -7883,7 +7883,7 @@ // CHECK4-51-NEXT: [[TMP137:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM46]], i32 [[TMP136]], i64 40, i64 8, ptr @.omp_task_entry..16) // CHECK4-51-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP137]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP138]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8 +// CHECK4-51-NEXT: [[TMP140:%.*]] = load ptr, ptr [[TMP139]], align 8, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP140]], ptr align 8 [[AGG_CAPTURED45]], i64 8, i1 false) // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM47:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB19]]) // CHECK4-51-NEXT: [[TMP141:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM47]], ptr [[TMP137]]) @@ -7892,13 +7892,13 @@ // CHECK4-51-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP142]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP142]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_20:%.*]], ptr [[TMP144]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128 +// CHECK4-51-NEXT: [[TMP146:%.*]] = load i32, ptr [[C]], align 128, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: store i32 [[TMP146]], ptr [[TMP145]], align 128 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM50:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) // CHECK4-51-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP143]], i32 0, i32 2 // CHECK4-51-NEXT: store i32 0, ptr [[TMP147]], align 16 // CHECK4-51-NEXT: [[TMP148:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM50]], ptr [[TMP142]]) -// CHECK4-51-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4 +// CHECK4-51-NEXT: [[TMP149:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: store i32 [[TMP149]], ptr [[RETVAL]], align 4 // CHECK4-51-NEXT: [[TMP150:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK4-51-NEXT: call void @llvm.stackrestore(ptr [[TMP150]]) @@ -7939,25 +7939,25 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK4-51-NEXT: store i32 15, ptr @a, align 4, !noalias !12 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !12 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK4-51-NEXT: store i32 15, ptr @a, align 4, !noalias !13 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load i32, ptr @a, align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP9]] to i8 // CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-51-NEXT: store i8 [[CONV_I]], ptr [[TMP10]], align 1 @@ -7980,24 +7980,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !22 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !22 -// CHECK4-51-NEXT: store i32 15, ptr @a, align 4, !noalias !22 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META14:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !23 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !23 +// CHECK4-51-NEXT: store i32 15, ptr @a, align 4, !noalias !23 // CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-51-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x %struct.S], ptr [[TMP9]], i64 0, i64 1 // CHECK4-51-NEXT: store i32 10, ptr [[ARRAYIDX_I]], align 4 @@ -8018,48 +8018,48 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META27:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !33 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-51-NEXT: ] // CHECK4-51: .untied.done..i: -// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4-51: .untied.jmp..i: -// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !32 +// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !33 // CHECK4-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) -// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !32 +// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !33 // CHECK4-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT:%.*]] // CHECK4-51: .untied.jmp.1.i: // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM2_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-51-NEXT: call void @__kmpc_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !32 +// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !33 // CHECK4-51-NEXT: call void @__kmpc_end_critical(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2_I]], ptr @.gomp_critical_user_.var) -// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-51-NEXT: br label [[CLEANUP_I]] // CHECK4-51: cleanup.i: -// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !32 +// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !33 // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] // CHECK4-51: .omp_outlined..3.exit: // CHECK4-51-NEXT: ret i32 0 @@ -8079,45 +8079,45 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !42 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META34:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META37:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !43 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-51-NEXT: ] // CHECK4-51: .untied.done..i: -// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4-51: .untied.jmp..i: -// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !42 +// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !43 // CHECK4-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9]]) -// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !42 +// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !43 // CHECK4-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT:%.*]] // CHECK4-51: .untied.jmp.1.i: -// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !42 -// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !43 +// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-51-NEXT: br label [[CLEANUP_I]] // CHECK4-51: cleanup.i: -// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !42 +// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !43 // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__5_EXIT]] // CHECK4-51: .omp_outlined..5.exit: // CHECK4-51-NEXT: ret i32 0 @@ -8137,45 +8137,45 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !52 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META44:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META47:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !53 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: switch i32 [[TMP10]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-51-NEXT: i32 1, label [[DOTUNTIED_JMP_1_I:%.*]] // CHECK4-51-NEXT: ] // CHECK4-51: .untied.done..i: -// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4-51: .untied.jmp..i: -// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !52 +// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !53 // CHECK4-51-NEXT: store i32 1, ptr [[TMP11]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB11]]) -// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !52 +// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !53 // CHECK4-51-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP12]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT:%.*]] // CHECK4-51: .untied.jmp.1.i: -// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !52 -// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-51-NEXT: store i32 1, ptr @a, align 4, !noalias !53 +// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-51-NEXT: br label [[CLEANUP_I]] // CHECK4-51: cleanup.i: -// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !52 +// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !53 // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__7_EXIT]] // CHECK4-51: .omp_outlined..7.exit: // CHECK4-51-NEXT: ret i32 0 @@ -8194,24 +8194,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !62 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !62 -// CHECK4-51-NEXT: store i32 2, ptr @a, align 4, !noalias !62 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META54:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META57:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !63 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !63 +// CHECK4-51-NEXT: store i32 2, ptr @a, align 4, !noalias !63 // CHECK4-51-NEXT: ret i32 0 // // @@ -8228,24 +8228,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !72 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !72 -// CHECK4-51-NEXT: store i32 2, ptr @a, align 4, !noalias !72 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META64:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META67:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !73 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !73 +// CHECK4-51-NEXT: store i32 2, ptr @a, align 4, !noalias !73 // CHECK4-51-NEXT: ret i32 0 // // @@ -8262,24 +8262,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !82 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !82 -// CHECK4-51-NEXT: store i32 3, ptr @a, align 4, !noalias !82 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META74:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META77:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !83 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !83 +// CHECK4-51-NEXT: store i32 3, ptr @a, align 4, !noalias !83 // CHECK4-51-NEXT: ret i32 0 // // @@ -8296,24 +8296,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !92 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !92 -// CHECK4-51-NEXT: store i32 4, ptr @a, align 4, !noalias !92 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META84:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META87:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !93 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !93 +// CHECK4-51-NEXT: store i32 4, ptr @a, align 4, !noalias !93 // CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 // CHECK4-51-NEXT: store i32 5, ptr [[TMP9]], align 128 // CHECK4-51-NEXT: ret i32 0 @@ -8347,30 +8347,30 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18]], ptr [[TMP3]], i32 0, i32 2 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !102 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !102 -// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !102 +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META94:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META97:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !103 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !103 +// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !103 // CHECK4-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4]] -// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !102 +// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !103 // CHECK4-51-NEXT: store i32 4, ptr [[TMP12]], align 128 -// CHECK4-51-NEXT: store i32 4, ptr @a, align 4, !noalias !102 +// CHECK4-51-NEXT: store i32 4, ptr @a, align 4, !noalias !103 // CHECK4-51-NEXT: ret i32 0 // // @@ -8428,32 +8428,32 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 128, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19]], ptr [[TMP3]], i32 0, i32 2 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !112 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META104:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META107:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !113 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: store ptr @.omp_task_privates_map..20, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR_I]], ptr [[DOTLOCAL_PTR_ADDR1_I]]) #[[ATTR4]] -// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 -// CHECK4-51-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4 +// CHECK4-51-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTLOCAL_PTR_ADDR1_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 +// CHECK4-51-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: switch i32 [[TMP16]], label [[DOTUNTIED_DONE__I:%.*]] [ // CHECK4-51-NEXT: i32 0, label [[DOTUNTIED_JMP__I:%.*]] // CHECK4-51-NEXT: i32 1, label [[DOTUNTIED_JMP_2_I:%.*]] @@ -8462,61 +8462,61 @@ // CHECK4-51-NEXT: i32 4, label [[DOTUNTIED_JMP_15_I:%.*]] // CHECK4-51-NEXT: ] // CHECK4-51: .untied.done..i: -// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-51-NEXT: store i32 1, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-51-NEXT: br label [[CLEANUP_I:%.*]] // CHECK4-51: .untied.jmp..i: -// CHECK4-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: store i32 1, ptr [[TMP17]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM_I]], ptr [[TMP18]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT:%.*]] // CHECK4-51: .untied.jmp.2.i: // CHECK4-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) // CHECK4-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) -// CHECK4-51-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !112 +// CHECK4-51-NEXT: store i32 0, ptr [[S2_I]], align 4, !noalias !113 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM3_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23:[0-9]+]]) // CHECK4-51-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM3_I]], i32 1, i64 256, i64 1, ptr @.omp_task_entry..19) // CHECK4-51-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_18:%.*]], ptr [[TMP20]], i32 0, i32 2 -// CHECK4-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128 +// CHECK4-51-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP12]], align 128, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 128 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM4_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB23]]) // CHECK4-51-NEXT: [[TMP23:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM4_I]], ptr [[TMP20]]) -// CHECK4-51-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: store i32 2, ptr [[TMP24]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM5_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM5_I]], ptr [[TMP25]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4-51: .untied.jmp.6.i: // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM8_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-51-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_omp_taskyield(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM8_I]], i32 0) -// CHECK4-51-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: store i32 3, ptr [[TMP28]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM9_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-51-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: [[TMP30:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM9_I]], ptr [[TMP29]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4-51: .untied.jmp.10.i: // CHECK4-51-NEXT: call void @_ZN1SC1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) -// CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !112 +// CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[S1_I]], ptr align 4 [[REF_TMP_I]], i64 4, i1 false), !noalias !113 // CHECK4-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[REF_TMP_I]]) #[[ATTR4]] -// CHECK4-51-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !112 +// CHECK4-51-NEXT: store i32 10, ptr [[S2_I]], align 4, !noalias !113 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM13_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK4-51-NEXT: [[TMP31:%.*]] = call i32 @__kmpc_omp_taskwait(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM13_I]]) -// CHECK4-51-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP32:%.*]] = load ptr, ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: store i32 4, ptr [[TMP32]], align 4 // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM14_I:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB21]]) -// CHECK4-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !112 +// CHECK4-51-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !113 // CHECK4-51-NEXT: [[TMP34:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM14_I]], ptr [[TMP33]]) // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4-51: .untied.jmp.15.i: // CHECK4-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S2_I]]) #[[ATTR4]] // CHECK4-51-NEXT: call void @_ZN1SD1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[S1_I]]) #[[ATTR4]] -// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-51-NEXT: store i32 0, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-51-NEXT: br label [[CLEANUP_I]] // CHECK4-51: cleanup.i: -// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !112 +// CHECK4-51-NEXT: [[CLEANUP_DEST_I:%.*]] = load i32, ptr [[CLEANUP_DEST_SLOT_I]], align 4, !noalias !113 // CHECK4-51-NEXT: br label [[DOTOMP_OUTLINED__17_EXIT]] // CHECK4-51: .omp_outlined..17.exit: // CHECK4-51-NEXT: ret i32 0 @@ -8582,7 +8582,7 @@ // CHECK4-51-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..23) // CHECK4-51-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP1]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP2]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK4-51-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP4]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK4-51-NEXT: [[OMP_GLOBAL_THREAD_NUM2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB25]]) // CHECK4-51-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM2]], ptr [[TMP1]]) @@ -8602,24 +8602,24 @@ // CHECK4-51-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK4-51-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK4-51-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK4-51-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK4-51-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_22:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK4-51-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK4-51-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META116:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META118:![0-9]+]]) -// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META120:![0-9]+]]) -// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !122 -// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !122 -// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK4-51-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META114:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META117:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META119:![0-9]+]]) +// CHECK4-51-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META121:![0-9]+]]) +// CHECK4-51-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !123 +// CHECK4-51-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !123 +// CHECK4-51-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8, !noundef [[NOUNDEF3]] // CHECK4-51-NEXT: store i32 0, ptr [[TMP9]], align 4 // CHECK4-51-NEXT: ret i32 0 // diff --git a/clang/test/OpenMP/task_if_codegen.cpp b/clang/test/OpenMP/task_if_codegen.cpp --- a/clang/test/OpenMP/task_if_codegen.cpp +++ b/clang/test/OpenMP/task_if_codegen.cpp @@ -103,11 +103,11 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP1]], i32 1, i64 40, i64 1, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP2]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP2]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP2]]) -// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @.omp_task_entry.(i32 [[TMP1]], ptr [[TMP2]]) #[[ATTR3:[0-9]+]] +// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @.omp_task_entry.(i32 [[TMP1]], ptr [[TMP2]]) #[[ATTR3:[0-9]+]] // CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP1]], ptr [[TMP2]]) // CHECK1-NEXT: ret void // @@ -125,23 +125,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 // CHECK1-NEXT: call void @_Z9gtid_testv() // CHECK1-NEXT: ret i32 0 // @@ -159,53 +159,53 @@ // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..3) -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) -// CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..5) -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP5]], i32 0, i32 0 -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP5]]) -// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @.omp_task_entry..5(i32 [[TMP0]], ptr [[TMP5]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP5]]) -// CHECK1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..7) -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr @Arg, align 4 -// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) +// CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..5) +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP4]], i32 0, i32 0 +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP4]]) +// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @.omp_task_entry..5(i32 [[TMP0]], ptr [[TMP4]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP4]]) +// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..7) +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) +// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) -// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @.omp_task_entry..7(i32 [[TMP0]], ptr [[TMP9]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) +// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @.omp_task_entry..7(i32 [[TMP0]], ptr [[TMP7]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..9) -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP15]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP19]], i32 0, i32 0 -// CHECK1-NEXT: store i64 ptrtoint (ptr @Arg to i64), ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP19]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP19]], i32 0, i32 2 -// CHECK1-NEXT: store i8 3, ptr [[TMP22]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..9) +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP12]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP14]], i64 0 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP15]], i32 0, i32 0 +// CHECK1-NEXT: store i64 ptrtoint (ptr @Arg to i64), ptr [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP15]], i32 0, i32 1 +// CHECK1-NEXT: store i64 4, ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP15]], i32 0, i32 2 +// CHECK1-NEXT: store i8 3, ptr [[TMP18]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DEP_COUNTER_ADDR]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr @Arg, align 4 -// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[TMP19]], 0 // CHECK1-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN5:%.*]], label [[OMP_IF_ELSE6:%.*]] // CHECK1: omp_if.then5: -// CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]], i32 1, ptr [[TMP18]], i32 0, ptr null) +// CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]], i32 1, ptr [[TMP14]], i32 0, ptr null) // CHECK1-NEXT: br label [[OMP_IF_END7:%.*]] // CHECK1: omp_if.else6: -// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP18]], i32 0, ptr null) -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]]) -// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @.omp_task_entry..9(i32 [[TMP0]], ptr [[TMP15]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]]) +// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP14]], i32 0, ptr null) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]]) +// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @.omp_task_entry..9(i32 [[TMP0]], ptr [[TMP12]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]]) // CHECK1-NEXT: br label [[OMP_IF_END7]] // CHECK1: omp_if.end7: -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr @Arg, align 4 -// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP27]]) +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr @Arg, align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP22]]) // CHECK1-NEXT: ret i32 [[CALL]] // // @@ -222,23 +222,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_1:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: call void @_Z3fn7v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -256,23 +256,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_3:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META25:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META32:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !34 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !34 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !34 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !34 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !34 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !34 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !34 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META29:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META31:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META33:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !35 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !35 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !35 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !35 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !35 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !35 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !35 // CHECK1-NEXT: call void @_Z3fn8v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -290,23 +290,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_5:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META35:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META38:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META40:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META42:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !44 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !44 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !44 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !44 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !44 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !44 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !44 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META36:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META39:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META41:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META43:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !45 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !45 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !45 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !45 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !45 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !45 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !45 // CHECK1-NEXT: call void @_Z3fn9v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -324,23 +324,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_7:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META45:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META48:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META50:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META52:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !54 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !54 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !54 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !54 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !54 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !54 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !54 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META46:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META49:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META51:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META53:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !55 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !55 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !55 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !55 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !55 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !55 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !55 // CHECK1-NEXT: call void @_Z4fn10v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -364,100 +364,100 @@ // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) // CHECK1-NEXT: store i32 [[ARG]], ptr [[ARG_ADDR]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..11) -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) -// CHECK1-NEXT: [[TMP5:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..13) -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP5]], i32 0, i32 0 -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP5]]) -// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @.omp_task_entry..13(i32 [[TMP0]], ptr [[TMP5]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP5]]) -// CHECK1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..15) -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP9]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 -// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) +// CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..13) +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP4]], i32 0, i32 0 +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP4]]) +// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @.omp_task_entry..13(i32 [[TMP0]], ptr [[TMP4]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP4]]) +// CHECK1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..15) +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: -// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) +// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) -// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @.omp_task_entry..15(i32 [[TMP0]], ptr [[TMP9]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP9]]) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) +// CHECK1-NEXT: [[TMP11:%.*]] = call i32 @.omp_task_entry..15(i32 [[TMP0]], ptr [[TMP7]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP7]]) // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP15:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..17) -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP15]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP19:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP18]], i64 0 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP20]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP19]], ptr [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP20]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP20]], i32 0, i32 2 -// CHECK1-NEXT: store i8 1, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..17) +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP12]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO:%.*]], ptr [[TMP14]], i64 0 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP16]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP15]], ptr [[TMP17]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP16]], i32 0, i32 1 +// CHECK1-NEXT: store i64 4, ptr [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP16]], i32 0, i32 2 +// CHECK1-NEXT: store i8 1, ptr [[TMP19]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DEP_COUNTER_ADDR]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 -// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[TMP25]], 0 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL4:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK1-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN5:%.*]], label [[OMP_IF_ELSE6:%.*]] // CHECK1: omp_if.then5: -// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]], i32 1, ptr [[TMP18]], i32 0, ptr null) +// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]], i32 1, ptr [[TMP14]], i32 0, ptr null) // CHECK1-NEXT: br label [[OMP_IF_END7:%.*]] // CHECK1: omp_if.else6: -// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP18]], i32 0, ptr null) -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]]) -// CHECK1-NEXT: [[TMP27:%.*]] = call i32 @.omp_task_entry..17(i32 [[TMP0]], ptr [[TMP15]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP15]]) +// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP14]], i32 0, ptr null) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]]) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @.omp_task_entry..17(i32 [[TMP0]], ptr [[TMP12]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP12]]) // CHECK1-NEXT: br label [[OMP_IF_END7]] // CHECK1: omp_if.end7: -// CHECK1-NEXT: [[TMP28:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..19) -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP28]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR9]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP32:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP31]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP33]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP32]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP33]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, ptr [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP33]], i32 0, i32 2 -// CHECK1-NEXT: store i8 3, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..19) +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP23]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR9]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP26:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP25]], i64 0 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP27]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP26]], ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP27]], i32 0, i32 1 +// CHECK1-NEXT: store i64 4, ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP27]], i32 0, i32 2 +// CHECK1-NEXT: store i8 3, ptr [[TMP30]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DEP_COUNTER_ADDR10]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 -// CHECK1-NEXT: [[TOBOOL11:%.*]] = icmp ne i32 [[TMP38]], 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL11:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK1-NEXT: br i1 [[TOBOOL11]], label [[OMP_IF_THEN12:%.*]], label [[OMP_IF_ELSE13:%.*]] // CHECK1: omp_if.then12: -// CHECK1-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP28]], i32 1, ptr [[TMP31]], i32 0, ptr null) +// CHECK1-NEXT: [[TMP32:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP23]], i32 1, ptr [[TMP25]], i32 0, ptr null) // CHECK1-NEXT: br label [[OMP_IF_END14:%.*]] // CHECK1: omp_if.else13: -// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP31]], i32 0, ptr null) -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP28]]) -// CHECK1-NEXT: [[TMP40:%.*]] = call i32 @.omp_task_entry..19(i32 [[TMP0]], ptr [[TMP28]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP28]]) +// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP25]], i32 0, ptr null) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP23]]) +// CHECK1-NEXT: [[TMP33:%.*]] = call i32 @.omp_task_entry..19(i32 [[TMP0]], ptr [[TMP23]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP23]]) // CHECK1-NEXT: br label [[OMP_IF_END14]] // CHECK1: omp_if.end14: -// CHECK1-NEXT: [[TMP41:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..21) -// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP41]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR16]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP44]], i64 0 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP46]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP46]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP46]], i32 0, i32 2 -// CHECK1-NEXT: store i8 3, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 1, ptr @.omp_task_entry..21) +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP34]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [1 x %struct.kmp_depend_info], ptr [[DOTDEP_ARR_ADDR16]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARG_ADDR]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP36]], i64 0 +// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP38]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP37]], ptr [[TMP39]], align 8 +// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP38]], i32 0, i32 1 +// CHECK1-NEXT: store i64 4, ptr [[TMP40]], align 8 +// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_DEPEND_INFO]], ptr [[TMP38]], i32 0, i32 2 +// CHECK1-NEXT: store i8 3, ptr [[TMP41]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DEP_COUNTER_ADDR17]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[ARG_ADDR]], align 4 -// CHECK1-NEXT: [[TOBOOL18:%.*]] = icmp ne i32 [[TMP51]], 0 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[ARG_ADDR]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TOBOOL18:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK1-NEXT: br i1 [[TOBOOL18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE20:%.*]] // CHECK1: omp_if.then19: -// CHECK1-NEXT: [[TMP52:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP41]], i32 1, ptr [[TMP44]], i32 0, ptr null) +// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__kmpc_omp_task_with_deps(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP34]], i32 1, ptr [[TMP36]], i32 0, ptr null) // CHECK1-NEXT: br label [[OMP_IF_END21:%.*]] // CHECK1: omp_if.else20: -// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP44]], i32 0, ptr null) -// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP41]]) -// CHECK1-NEXT: [[TMP53:%.*]] = call i32 @.omp_task_entry..21(i32 [[TMP0]], ptr [[TMP41]]) #[[ATTR3]] -// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP41]]) +// CHECK1-NEXT: call void @__kmpc_omp_wait_deps(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, ptr [[TMP36]], i32 0, ptr null) +// CHECK1-NEXT: call void @__kmpc_omp_task_begin_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP34]]) +// CHECK1-NEXT: [[TMP44:%.*]] = call i32 @.omp_task_entry..21(i32 [[TMP0]], ptr [[TMP34]]) #[[ATTR3]] +// CHECK1-NEXT: call void @__kmpc_omp_task_complete_if0(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP34]]) // CHECK1-NEXT: br label [[OMP_IF_END21]] // CHECK1: omp_if.end21: // CHECK1-NEXT: ret i32 0 @@ -476,23 +476,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_9:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META55:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META58:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META60:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META62:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !64 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META56:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META59:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META61:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META63:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !65 // CHECK1-NEXT: call void @_Z3fn1v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -510,23 +510,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_11:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META65:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META68:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META70:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META72:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !74 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !74 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !74 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !74 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !74 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !74 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !74 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META66:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META69:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META71:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META73:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !75 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !75 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !75 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !75 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !75 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !75 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !75 // CHECK1-NEXT: call void @_Z3fn2v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -544,23 +544,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_13:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META75:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META78:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META80:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META82:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !84 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !84 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !84 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !84 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !84 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !84 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !84 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META76:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META79:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META81:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META83:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !85 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !85 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !85 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !85 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !85 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !85 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !85 // CHECK1-NEXT: call void @_Z3fn3v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -578,23 +578,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_15:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META85:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META88:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META90:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META92:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !94 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !94 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !94 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !94 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !94 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !94 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !94 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META86:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META89:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META91:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META93:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !95 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !95 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !95 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !95 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !95 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !95 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !95 // CHECK1-NEXT: call void @_Z3fn4v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -612,23 +612,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_17:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META95:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META98:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META100:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META102:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !104 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !104 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !104 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !104 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !104 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !104 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !104 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META96:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META99:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META101:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META103:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !105 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !105 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !105 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !105 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !105 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !105 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !105 // CHECK1-NEXT: call void @_Z3fn5v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // @@ -646,23 +646,23 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_19:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META105:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META108:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META110:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META112:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !114 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !114 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !114 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !114 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !114 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !114 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !114 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META106:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META109:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META111:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META113:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !115 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !115 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !115 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !115 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !115 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !115 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !115 // CHECK1-NEXT: call void @_Z3fn6v() #[[ATTR3]] // CHECK1-NEXT: ret i32 0 // diff --git a/clang/test/OpenMP/task_in_reduction_codegen.cpp b/clang/test/OpenMP/task_in_reduction_codegen.cpp --- a/clang/test/OpenMP/task_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/task_in_reduction_codegen.cpp @@ -83,7 +83,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -180,7 +180,7 @@ // CHECK1-NEXT: [[TMP44:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 40, i64 8, ptr @.omp_task_entry..11) // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP44]], i32 0, i32 0 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP45]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = load ptr, ptr [[TMP46]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP47]], ptr align 8 [[AGG_CAPTURED]], i64 8, i1 false) // CHECK1-NEXT: [[TMP48:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP44]]) // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 @@ -231,8 +231,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -259,8 +259,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP4]], [[TMP5]] // CHECK1-NEXT: store float [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -287,8 +287,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -407,7 +407,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP3]], i64 [[TMP5]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -430,7 +430,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP5]], i64 [[TMP4]] @@ -439,9 +439,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 @@ -473,7 +473,7 @@ // CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTTASK_RED_1]], ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR2]], align 8 @@ -488,18 +488,18 @@ // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 4 // CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP11]], i32 1, i64 56, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP13]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP12]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP16]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP16]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP11]], ptr [[TMP12]]) // CHECK1-NEXT: ret void @@ -539,34 +539,34 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 // CHECK1-NEXT: call void [[TMP12]](ptr [[TMP13]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !13 // CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 +// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP19:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP18]], ptr [[TMP17]], ptr [[TMP16]]) // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 // CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8 @@ -574,14 +574,14 @@ // CHECK1-NEXT: [[TMP23:%.*]] = udiv exact i64 [[TMP22]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP24:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP18]], ptr @{{reduction_size[.].+[.]}}) // CHECK1-NEXT: store i64 [[TMP23]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP18]], ptr [[TMP25]], ptr [[TMP21]]) -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP27]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, ptr [[TMP26]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP28:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2 +// CHECK1-NEXT: [[TMP28:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = sext i16 [[TMP28]] to i32 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD_I:%.*]] = add nsw i32 [[TMP29]], [[CONV_I]] // CHECK1-NEXT: store i32 [[ADD_I]], ptr [[TMP19]], align 4 // CHECK1-NEXT: ret i32 0 @@ -600,27 +600,27 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 -// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !24 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr null, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 +// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP10]], ptr null, ptr [[TMP9]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[INC_I:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK1-NEXT: store i32 [[INC_I]], ptr [[TMP11]], align 4 // CHECK1-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/task_member_call_codegen.cpp b/clang/test/OpenMP/task_member_call_codegen.cpp --- a/clang/test/OpenMP/task_member_call_codegen.cpp +++ b/clang/test/OpenMP/task_member_call_codegen.cpp @@ -31,10 +31,10 @@ // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) // CHECK1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 48, i64 1, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP1]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP1]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP1]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP3]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) // CHECK1-NEXT: ret void // // @@ -66,29 +66,29 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void @_ZN1a1bEv(ptr noundef nonnull align 1 dereferenceable(1) [[TMP16]]) #[[ATTR4]] +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void @_ZN1a1bEv(ptr noundef nonnull align 1 dereferenceable(1) [[TMP12]]) #[[ATTR4]] // CHECK1-NEXT: ret i32 0 // // @@ -99,11 +99,11 @@ // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) // CHECK3-NEXT: [[TMP0:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 1, i64 48, i64 1, ptr @.omp_task_entry.) -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP0]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP0]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP0]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[OMP_GLOBAL_THREAD_NUM1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]]) -// CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[TMP0]]) +// CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM1]], ptr [[TMP0]]) // CHECK3-NEXT: ret void // // @@ -135,28 +135,28 @@ // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] -// CHECK3-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK3-NEXT: call void @_ZN1a1bEv(ptr noundef nonnull align 1 dereferenceable(1) [[TMP16]]) #[[ATTR4]] +// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] +// CHECK3-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK3-NEXT: call void @_ZN1a1bEv(ptr noundef nonnull align 1 dereferenceable(1) [[TMP12]]) #[[ATTR4]] // CHECK3-NEXT: ret i32 0 // diff --git a/clang/test/OpenMP/task_target_device_codegen.c b/clang/test/OpenMP/task_target_device_codegen.c --- a/clang/test/OpenMP/task_target_device_codegen.c +++ b/clang/test/OpenMP/task_target_device_codegen.c @@ -27,12 +27,12 @@ // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]) // CHECK-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i64 48, i64 0, ptr @.omp_task_entry.) -// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP1]], i32 0, i32 1 -// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP4]], i32 0, i32 0 -// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[T]], align 4 -// CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP5]], align 8 -// CHECK-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) +// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP1]], i32 0, i32 0 +// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP1]], i32 0, i32 1 +// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP3]], i32 0, i32 0 +// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[T]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK-NEXT: store i32 [[TMP5]], ptr [[TMP4]], align 8 +// CHECK-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP0]], ptr [[TMP1]]) // CHECK-NEXT: ret void // // @@ -71,30 +71,40 @@ // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] -// CHECK-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4 -// CHECK-NEXT: store i32 [[TMP17]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !12 +// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] +// CHECK-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF3]] +// CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR__I]], align 4, !noalias !13 // CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18() #[[ATTR4]] // CHECK-NEXT: ret i32 0 // +// +// SIMD-ONLY0-LABEL: define {{[^@]+}}@test_task_affinity +// SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] { +// SIMD-ONLY0-NEXT: entry: +// SIMD-ONLY0-NEXT: [[T:%.*]] = alloca i32, align 4 +// SIMD-ONLY0-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, ptr [[T]], align 4, !noundef [[NOUNDEF2:![0-9]+]] +// SIMD-ONLY0-NEXT: store i32 [[TMP0]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// SIMD-ONLY0-NEXT: ret void +// diff --git a/clang/test/OpenMP/taskloop_in_reduction_codegen.cpp b/clang/test/OpenMP/taskloop_in_reduction_codegen.cpp --- a/clang/test/OpenMP/taskloop_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/taskloop_in_reduction_codegen.cpp @@ -69,7 +69,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -209,8 +209,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -237,8 +237,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP4]], [[TMP5]] // CHECK1-NEXT: store float [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -265,8 +265,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -385,7 +385,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP3]], i64 [[TMP5]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -408,7 +408,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP5]], i64 [[TMP4]] @@ -417,9 +417,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 @@ -452,7 +452,7 @@ // CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTTASK_RED_1]], ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR2]], align 8 @@ -467,19 +467,19 @@ // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 4 // CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP11]]) // CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP11]], i32 1, i64 96, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP13]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP12]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP16]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP16]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP21]], align 8 @@ -489,7 +489,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP24]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP11]], ptr [[TMP12]], i32 1, ptr [[TMP21]], ptr [[TMP22]], i64 [[TMP25]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP11]]) // CHECK1-NEXT: ret void @@ -536,50 +536,50 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP22]](ptr [[TMP23]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP27]], ptr [[TMP26]]) // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 @@ -587,32 +587,32 @@ // CHECK1-NEXT: [[TMP33:%.*]] = udiv exact i64 [[TMP32]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP28]], ptr @{{reduction_size[.].+[.]}}) // CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP35]], ptr [[TMP31]]) -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !14 -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !15 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP41]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, ptr [[TMP36]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2 +// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV3_I:%.*]] = sext i16 [[TMP42]] to i32 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4 +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP43]], [[CONV3_I]] // CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4 -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD5_I:%.*]] = add nsw i32 [[TMP44]], 1 -// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]] // CHECK1: .omp_outlined..9.exit: // CHECK1-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp b/clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp --- a/clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp +++ b/clang/test/OpenMP/taskloop_simd_in_reduction_codegen.cpp @@ -69,7 +69,7 @@ // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK1: arrayctor.cont: -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.stacksave() // CHECK1-NEXT: store ptr [[TMP3]], ptr [[SAVED_STACK]], align 8 @@ -209,8 +209,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -237,8 +237,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load float, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP4]], [[TMP5]] // CHECK1-NEXT: store float [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -265,8 +265,8 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void @@ -385,7 +385,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i16, ptr [[TMP3]], i64 [[TMP5]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP3]], [[TMP6]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -408,7 +408,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP2]], ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr i16, ptr [[TMP5]], i64 [[TMP4]] @@ -417,9 +417,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP8:%.*]] = load i16, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2 +// CHECK1-NEXT: [[TMP9:%.*]] = load i16, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 2, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 @@ -452,7 +452,7 @@ // CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTTASK_RED_1]], ptr [[DOTTASK_RED__ADDR2]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[D_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTTASK_RED__ADDR2]], align 8 @@ -467,19 +467,19 @@ // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 4 // CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskgroup(ptr @[[GLOB1]], i32 [[TMP11]]) // CHECK1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP11]], i32 1, i64 96, i64 40, ptr @.omp_task_entry.) // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP12]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP13]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP15]], ptr align 8 [[AGG_CAPTURED]], i64 40, i1 false) // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP12]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP16]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP18]], ptr [[TMP17]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], ptr [[TMP16]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP4]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 5 // CHECK1-NEXT: store i64 0, ptr [[TMP21]], align 8 @@ -489,7 +489,7 @@ // CHECK1-NEXT: store i64 1, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP13]], i32 0, i32 9 // CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP24]], i8 0, i64 8, i1 false) -// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP23]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @__kmpc_taskloop(ptr @[[GLOB1]], i32 [[TMP11]], ptr [[TMP12]], i32 1, ptr [[TMP21]], ptr [[TMP22]], i64 [[TMP25]], i32 1, i32 0, i64 0, ptr null) // CHECK1-NEXT: call void @__kmpc_end_taskgroup(ptr @[[GLOB1]], i32 [[TMP11]]) // CHECK1-NEXT: ret void @@ -536,50 +536,50 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP9]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP11]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP15]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP10]], ptr [[DOTLB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTUB__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTST__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store i32 [[TMP16]], ptr [[DOTLITER__ADDR_I]], align 4, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP18]], ptr [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP19]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP20]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !15 // CHECK1-NEXT: call void [[TMP22]](ptr [[TMP23]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3]] -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !15 +// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !15 // CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP24]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP27]], ptr [[TMP26]]) // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP19]], i32 0, i32 2 // CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 @@ -587,33 +587,33 @@ // CHECK1-NEXT: [[TMP33:%.*]] = udiv exact i64 [[TMP32]], ptrtoint (ptr getelementptr (i16, ptr null, i32 1) to i64) // CHECK1-NEXT: [[TMP34:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB1]], i32 [[TMP28]], ptr @{{reduction_size[.].+[.]}}) // CHECK1-NEXT: store i64 [[TMP33]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP36:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP28]], ptr [[TMP35]], ptr [[TMP31]]) -// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !14 +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTLB__ADDR_I]], align 8, !noalias !15, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV_I:%.*]] = trunc i64 [[TMP37]] to i32 -// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14 +// CHECK1-NEXT: store i32 [[CONV_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I:%.*]] // CHECK1: omp.inner.for.cond.i: -// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]] +// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2_I:%.*]] = sext i32 [[TMP38]] to i64 -// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTUB__ADDR_I]], align 8, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP39]] // CHECK1-NEXT: br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__9_EXIT:%.*]] // CHECK1: omp.inner.for.body.i: -// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i32 [[TMP40]], ptr [[I_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[IDXPROM_I:%.*]] = sext i32 [[TMP41]] to i64 // CHECK1-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds i16, ptr [[TMP36]], i64 [[IDXPROM_I]] -// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_I]], align 2, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV3_I:%.*]] = sext i16 [[TMP42]] to i32 -// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD4_I:%.*]] = add nsw i32 [[TMP43]], [[CONV3_I]] -// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: store i32 [[ADD4_I]], ptr [[TMP29]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ADD5_I:%.*]] = add nsw i32 [[TMP44]], 1 -// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5_I]], ptr [[DOTOMP_IV_I]], align 4, !noalias !15, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: .omp_outlined..9.exit: // CHECK1-NEXT: ret i32 0 // @@ -693,7 +693,7 @@ // CHECK3-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq ptr [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] // CHECK3-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] // CHECK3: arrayctor.cont: -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK3-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK3-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK3-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 @@ -701,37 +701,37 @@ // CHECK3-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 4, ptr [[DOTOMP_UB]], align 8 -// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK3-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP3]] to i32 // CHECK3-NEXT: store i32 [[CONV]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV2:%.*]] = sext i32 [[TMP4]] to i64 -// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP5]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP6]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[VLA]], i64 [[IDXPROM]] -// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP8:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP8]] to i32 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[CONV3]] -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[A]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 5, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_reduction_task_codegen.cpp @@ -44,10 +44,10 @@ // CHECK1-NEXT: [[ARGC_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[ARGC_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l14(i64 [[TMP1]], ptr [[TMP2]]) #[[ATTR6:[0-9]+]] // CHECK1-NEXT: ret i32 0 // @@ -59,7 +59,7 @@ // CHECK1-NEXT: [[ARGV_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i64 [[ARGC]], ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB1:[0-9]+]], i32 2, ptr @.omp_outlined., ptr [[ARGC_ADDR]], ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -94,14 +94,14 @@ // CHECK1-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP1]], i64 0 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 0 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP4]] -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP5]], i64 9 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP6]], i64 [[LB_ADD_LEN]] @@ -137,178 +137,178 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP22]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP23]], align 8 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init., ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb., ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP28]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_6:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP33]], i64 0 -// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP34]], i64 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP36]] -// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP37]], i64 9 -// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP38]], i64 [[LB_ADD_LEN9]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 -// CHECK1-NEXT: [[TMP41:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 -// CHECK1-NEXT: [[TMP42:%.*]] = sub i64 [[TMP40]], [[TMP41]] -// CHECK1-NEXT: [[TMP43:%.*]] = sdiv exact i64 [[TMP42]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP44:%.*]] = add nuw i64 [[TMP43]], 1 -// CHECK1-NEXT: [[TMP45:%.*]] = mul nuw i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP45]], ptr [[TMP46]], align 8 -// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP47]], align 8 -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP52]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP54]], ptr [[DOTTASK_RED_]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP30:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds ptr, ptr [[TMP30]], i64 0 +// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[ARRAYIDX7]], align 8 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[TMP31]], i64 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP33:%.*]] = sext i32 [[TMP32]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN9:%.*]] = add nsw i64 -1, [[TMP33]] +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr [[TMP34]], i64 9 +// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARRAYIDX10]], align 8 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i8, ptr [[TMP35]], i64 [[LB_ADD_LEN9]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX8]], ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP37:%.*]] = ptrtoint ptr [[ARRAYIDX11]] to i64 +// CHECK1-NEXT: [[TMP38:%.*]] = ptrtoint ptr [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP39:%.*]] = sub i64 [[TMP37]], [[TMP38]] +// CHECK1-NEXT: [[TMP40:%.*]] = sdiv exact i64 [[TMP39]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP41:%.*]] = add nuw i64 [[TMP40]], 1 +// CHECK1-NEXT: [[TMP42:%.*]] = mul nuw i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP42]], ptr [[TMP43]], align 8 +// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..1, ptr [[TMP44]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..2, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], ptr [[DOTRD_INPUT_GEP_6]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP48]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP50:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP49]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP50]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 // CHECK1-NEXT: store i64 9, ptr [[DOTOMP_COMB_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 -// CHECK1-NEXT: [[TMP55:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP56:%.*]] = load i32, ptr [[TMP55]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP56]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP57]], 9 +// CHECK1-NEXT: [[TMP51:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[TMP51]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP52]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP53:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP53]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP58]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP54]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP59]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP55]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP60]], [[TMP61]] +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP56]], [[TMP57]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[TMP]], align 8 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..3, i64 [[TMP62]], i64 [[TMP63]], ptr [[ARGC1]], ptr [[TMP64]]) +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK1-NEXT: [[TMP60:%.*]] = load ptr, ptr [[TMP]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB1]], i32 4, ptr @.omp_outlined..3, i64 [[TMP58]], i64 [[TMP59]], ptr [[ARGC1]], ptr [[TMP60]]) // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP65:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP65]], [[TMP66]] +// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP61]], [[TMP62]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP67:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP68:%.*]] = load i32, ptr [[TMP67]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP68]]) -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP70]], i32 1) -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP71]], align 8 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP73]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP75:%.*]] = inttoptr i64 [[TMP11]] to ptr -// CHECK1-NEXT: store ptr [[TMP75]], ptr [[TMP74]], align 8 -// CHECK1-NEXT: [[TMP76:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP76]], align 4 -// CHECK1-NEXT: [[TMP79:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP77]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.9, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP79]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP63:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = load i32, ptr [[TMP63]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP64]]) +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP66]], i32 1) +// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP67]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP68]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP70:%.*]] = inttoptr i64 [[TMP11]] to ptr +// CHECK1-NEXT: store ptr [[TMP70]], ptr [[TMP69]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP71]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP73:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4:[0-9]+]], i32 [[TMP72]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.9, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP73]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP80]], [[TMP81]] +// CHECK1-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP74]], [[TMP75]] // CHECK1-NEXT: store i32 [[ADD14]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP82]] +// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP76]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST15:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP83:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP83]] to i32 -// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP84]] to i32 +// CHECK1-NEXT: [[TMP77:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP77]] to i32 +// CHECK1-NEXT: [[TMP78:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV16:%.*]] = sext i8 [[TMP78]] to i32 // CHECK1-NEXT: [[ADD17:%.*]] = add nsw i32 [[CONV]], [[CONV16]] // CHECK1-NEXT: [[CONV18:%.*]] = trunc i32 [[ADD17]] to i8 // CHECK1-NEXT: store i8 [[CONV18]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT19]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST15]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP82]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT19]], [[TMP76]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done21: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP77]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP72]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP86:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP85]] monotonic, align 4 -// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP87]] +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP80:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP79]] monotonic, align 4 +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP11]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY22:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP81]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY22]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY23:%.*]] // CHECK1: omp.arraycpy.body23: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST24:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST25:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP88:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1 -// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP88]] to i32 +// CHECK1-NEXT: [[TMP82:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV26:%.*]] = sext i8 [[TMP82]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP89:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP94:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP89]], ptr [[_TMP27]], align 1 -// CHECK1-NEXT: [[TMP90:%.*]] = load i8, ptr [[_TMP27]], align 1 -// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP90]] to i32 -// CHECK1-NEXT: [[TMP91:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP91]] to i32 +// CHECK1-NEXT: [[TMP83:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY23]] ], [ [[TMP88:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP83]], ptr [[_TMP27]], align 1 +// CHECK1-NEXT: [[TMP84:%.*]] = load i8, ptr [[_TMP27]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV28:%.*]] = sext i8 [[TMP84]] to i32 +// CHECK1-NEXT: [[TMP85:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP85]] to i32 // CHECK1-NEXT: [[ADD30:%.*]] = add nsw i32 [[CONV28]], [[CONV29]] // CHECK1-NEXT: [[CONV31:%.*]] = trunc i32 [[ADD30]] to i8 // CHECK1-NEXT: store i8 [[CONV31]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP93:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP89]], i8 [[TMP92]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP94]] = extractvalue { i8, i1 } [[TMP93]], 0 -// CHECK1-NEXT: [[TMP95:%.*]] = extractvalue { i8, i1 } [[TMP93]], 1 -// CHECK1-NEXT: br i1 [[TMP95]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP86:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP87:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i8 [[TMP83]], i8 [[TMP86]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP88]] = extractvalue { i8, i1 } [[TMP87]], 0 +// CHECK1-NEXT: [[TMP89:%.*]] = extractvalue { i8, i1 } [[TMP87]], 1 +// CHECK1-NEXT: br i1 [[TMP89]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST25]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST24]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP87]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP81]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY23]] // CHECK1: omp.arraycpy.done35: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP96:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP96]]) +// CHECK1-NEXT: [[TMP90:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP90]]) // CHECK1-NEXT: ret void // // @@ -319,8 +319,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -331,12 +331,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -349,7 +349,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -371,7 +371,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -380,9 +380,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -431,21 +431,21 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARGC_ADDR]], align 8 // CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 9, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: store i64 [[TMP1]], ptr [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[TMP3]], i64 0 // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYIDX]], align 8 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 0 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP6:%.*]] = sext i32 [[TMP5]] to i64 // CHECK1-NEXT: [[LB_ADD_LEN:%.*]] = add nsw i64 -1, [[TMP6]] -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds ptr, ptr [[TMP7]], i64 9 // CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYIDX3]], align 8 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i64 [[LB_ADD_LEN]] @@ -481,198 +481,198 @@ // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], ptr [[DOTRD_INPUT_]], i64 0, i64 0 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0:%.*]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 -// CHECK1-NEXT: store i64 4, ptr [[TMP28]], align 8 -// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..4, ptr [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..5, ptr [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 -// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP32]], i8 0, i64 4, i1 false) +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 8 +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 4, ptr [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..4, ptr [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..5, ptr [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP30]], i8 0, i64 4, i1 false) // CHECK1-NEXT: [[DOTRD_INPUT_GEP_7:%.*]] = getelementptr inbounds [2 x %struct.kmp_taskred_input_t.0], ptr [[DOTRD_INPUT_]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP35:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP35]], i64 0 -// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 -// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP36]], i64 0 -// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP38:%.*]] = sext i32 [[TMP37]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP38]] -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8 -// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP39]], i64 9 -// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 -// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP40]], i64 [[LB_ADD_LEN10]] -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 8 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP42:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 -// CHECK1-NEXT: [[TMP43:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 -// CHECK1-NEXT: [[TMP44:%.*]] = sub i64 [[TMP42]], [[TMP43]] -// CHECK1-NEXT: [[TMP45:%.*]] = sdiv exact i64 [[TMP44]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP46:%.*]] = add nuw i64 [[TMP45]], 1 -// CHECK1-NEXT: [[TMP47:%.*]] = mul nuw i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 -// CHECK1-NEXT: store i64 [[TMP47]], ptr [[TMP48]], align 8 -// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 -// CHECK1-NEXT: store ptr @.red_init..6, ptr [[TMP49]], align 8 -// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 -// CHECK1-NEXT: store ptr null, ptr [[TMP50]], align 8 -// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.red_comb..7, ptr [[TMP51]], align 8 -// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 -// CHECK1-NEXT: store i32 1, ptr [[TMP52]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds ptr, ptr [[TMP32]], i64 0 +// CHECK1-NEXT: [[TMP33:%.*]] = load ptr, ptr [[ARRAYIDX8]], align 8 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i8, ptr [[TMP33]], i64 0 +// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = sext i32 [[TMP34]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN10:%.*]] = add nsw i64 -1, [[TMP35]] +// CHECK1-NEXT: [[TMP36:%.*]] = load ptr, ptr [[ARGV_ADDR]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr [[TMP36]], i64 9 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[ARRAYIDX11]], align 8 +// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i8, ptr [[TMP37]], i64 [[LB_ADD_LEN10]] +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARRAYIDX9]], ptr [[TMP38]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[ARRAYIDX12]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[ARRAYIDX9]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = add nuw i64 [[TMP42]], 1 +// CHECK1-NEXT: [[TMP44:%.*]] = mul nuw i64 [[TMP43]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP44]], ptr [[TMP45]], align 8 +// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 3 +// CHECK1-NEXT: store ptr @.red_init..6, ptr [[TMP46]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 4 +// CHECK1-NEXT: store ptr null, ptr [[TMP47]], align 8 +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.red_comb..7, ptr [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T_0]], ptr [[DOTRD_INPUT_GEP_7]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, ptr [[TMP49]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP51:%.*]] = load i32, ptr [[TMP50]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP52:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP51]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) +// CHECK1-NEXT: store ptr [[TMP52]], ptr [[DOTTASK_RED_]], align 8 // CHECK1-NEXT: [[TMP53:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4 -// CHECK1-NEXT: [[TMP56:%.*]] = call ptr @__kmpc_taskred_modifier_init(ptr @[[GLOB1]], i32 [[TMP54]], i32 1, i32 2, ptr [[DOTRD_INPUT_]]) -// CHECK1-NEXT: store ptr [[TMP56]], ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP58:%.*]] = load i32, ptr [[TMP57]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP58]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP59]], 9 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[TMP53]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3:[0-9]+]], i32 [[TMP54]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP55:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP55]], 9 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: -// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP60]], [[COND_FALSE]] ] +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP56]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 -// CHECK1-NEXT: store i64 [[TMP61]], ptr [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP57:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store i64 [[TMP57]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP62:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP62]], [[TMP63]] +// CHECK1-NEXT: [[TMP58:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP59:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CMP13:%.*]] = icmp sle i64 [[TMP58]], [[TMP59]] // CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] // CHECK1: omp.inner.for.cond.cleanup: // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP64:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP64]], 1 +// CHECK1-NEXT: [[TMP60:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP60]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], ptr [[I]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP68:%.*]] = load ptr, ptr [[_TMP5]], align 8 -// CHECK1-NEXT: store ptr [[TMP68]], ptr [[TMP67]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP70:%.*]] = load i32, ptr [[TMP69]], align 4 -// CHECK1-NEXT: [[TMP71:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP70]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP71]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP73]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP75:%.*]] = load ptr, ptr [[TMP74]], align 8 -// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP75]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP71]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP77]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP79:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8 -// CHECK1-NEXT: store ptr [[TMP79]], ptr [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 -// CHECK1-NEXT: [[TMP82:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP81]], ptr [[TMP71]]) +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[DOTTASK_RED_]], ptr [[TMP61]], align 8 +// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 1 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP62]], align 8 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[AGG_CAPTURED]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP64:%.*]] = load ptr, ptr [[_TMP5]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP67:%.*]] = call ptr @__kmpc_omp_task_alloc(ptr @[[GLOB1]], i32 [[TMP66]], i32 1, i64 48, i64 24, ptr @.omp_task_entry.) +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP67]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP68]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP70:%.*]] = load ptr, ptr [[TMP69]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[TMP70]], ptr align 8 [[AGG_CAPTURED]], i64 24, i1 false) +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP67]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], ptr [[TMP71]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP73:%.*]] = load ptr, ptr [[DOTTASK_RED_]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP74]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP76:%.*]] = call i32 @__kmpc_omp_task(ptr @[[GLOB1]], i32 [[TMP75]], ptr [[TMP67]]) // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP83:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8 -// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP83]], 1 +// CHECK1-NEXT: [[TMP77:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP77]], 1 // CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: -// CHECK1-NEXT: [[TMP84:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = load i32, ptr [[TMP84]], align 4 -// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP85]]) +// CHECK1-NEXT: [[TMP78:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP78]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP79]]) +// CHECK1-NEXT: [[TMP80:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP81]], i32 1) +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP83]], align 8 +// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 +// CHECK1-NEXT: [[TMP85:%.*]] = inttoptr i64 [[TMP13]] to ptr +// CHECK1-NEXT: store ptr [[TMP85]], ptr [[TMP84]], align 8 // CHECK1-NEXT: [[TMP86:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4 -// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(ptr @[[GLOB1]], i32 [[TMP87]], i32 1) -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 -// CHECK1-NEXT: store ptr [[ARGC1]], ptr [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 -// CHECK1-NEXT: store ptr [[VLA]], ptr [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP92:%.*]] = inttoptr i64 [[TMP13]] to ptr -// CHECK1-NEXT: store ptr [[TMP92]], ptr [[TMP91]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP93]], align 4 -// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP94]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP96]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP87:%.*]] = load i32, ptr [[TMP86]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP88:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP87]], i32 2, i64 24, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP88]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP98:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP97]], [[TMP98]] +// CHECK1-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP90:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP89]], [[TMP90]] // CHECK1-NEXT: store i32 [[ADD15]], ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP99]] +// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE22:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST16:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT20:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP100]] to i32 -// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP101]] to i32 +// CHECK1-NEXT: [[TMP92:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP92]] to i32 +// CHECK1-NEXT: [[TMP93:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV17:%.*]] = sext i8 [[TMP93]] to i32 // CHECK1-NEXT: [[ADD18:%.*]] = add nsw i32 [[CONV]], [[CONV17]] // CHECK1-NEXT: [[CONV19:%.*]] = trunc i32 [[ADD18]] to i8 // CHECK1-NEXT: store i8 [[CONV19]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT20]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST16]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP99]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE21:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT20]], [[TMP91]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_DONE22]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done22: -// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP94]], ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB4]], i32 [[TMP87]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP102:%.*]] = load i32, ptr [[ARGC1]], align 4 -// CHECK1-NEXT: [[TMP103:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP102]] monotonic, align 4 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP104]] +// CHECK1-NEXT: [[TMP94:%.*]] = load i32, ptr [[ARGC1]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP95:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP94]] monotonic, align 4 +// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr i8, ptr [[ARRAYIDX2]], i64 [[TMP13]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY23:%.*]] = icmp eq ptr [[ARRAYIDX2]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY23]], label [[OMP_ARRAYCPY_DONE36:%.*]], label [[OMP_ARRAYCPY_BODY24:%.*]] // CHECK1: omp.arraycpy.body24: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST25:%.*]] = phi ptr [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT34:%.*]], [[ATOMIC_EXIT:%.*]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST26:%.*]] = phi ptr [ [[ARRAYIDX2]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT33:%.*]], [[ATOMIC_EXIT]] ] -// CHECK1-NEXT: [[TMP105:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP105]] to i32 +// CHECK1-NEXT: [[TMP97:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV27:%.*]] = sext i8 [[TMP97]] to i32 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]] monotonic, align 1 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] // CHECK1: atomic_cont: -// CHECK1-NEXT: [[TMP106:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP111:%.*]], [[ATOMIC_CONT]] ] -// CHECK1-NEXT: store i8 [[TMP106]], ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[TMP107:%.*]] = load i8, ptr [[_TMP28]], align 1 -// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP107]] to i32 -// CHECK1-NEXT: [[TMP108:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1 -// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP108]] to i32 +// CHECK1-NEXT: [[TMP98:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY24]] ], [ [[TMP103:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: store i8 [[TMP98]], ptr [[_TMP28]], align 1 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8, ptr [[_TMP28]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV29:%.*]] = sext i8 [[TMP99]] to i32 +// CHECK1-NEXT: [[TMP100:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV30:%.*]] = sext i8 [[TMP100]] to i32 // CHECK1-NEXT: [[ADD31:%.*]] = add nsw i32 [[CONV29]], [[CONV30]] // CHECK1-NEXT: [[CONV32:%.*]] = trunc i32 [[ADD31]] to i8 // CHECK1-NEXT: store i8 [[CONV32]], ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP109:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 -// CHECK1-NEXT: [[TMP110:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP106]], i8 [[TMP109]] monotonic monotonic, align 1 -// CHECK1-NEXT: [[TMP111]] = extractvalue { i8, i1 } [[TMP110]], 0 -// CHECK1-NEXT: [[TMP112:%.*]] = extractvalue { i8, i1 } [[TMP110]], 1 -// CHECK1-NEXT: br i1 [[TMP112]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1-NEXT: [[TMP101:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK1-NEXT: [[TMP102:%.*]] = cmpxchg ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i8 [[TMP98]], i8 [[TMP101]] monotonic monotonic, align 1 +// CHECK1-NEXT: [[TMP103]] = extractvalue { i8, i1 } [[TMP102]], 0 +// CHECK1-NEXT: [[TMP104:%.*]] = extractvalue { i8, i1 } [[TMP102]], 1 +// CHECK1-NEXT: br i1 [[TMP104]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] // CHECK1: atomic_exit: // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT33]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST26]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT34]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST25]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP104]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE35:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT33]], [[TMP96]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_DONE36]], label [[OMP_ARRAYCPY_BODY24]] // CHECK1: omp.arraycpy.done36: // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: -// CHECK1-NEXT: [[TMP113:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP113]]) +// CHECK1-NEXT: [[TMP105:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(ptr [[TMP105]]) // CHECK1-NEXT: ret void // // @@ -683,8 +683,8 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: store i32 0, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: store i32 0, ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -695,12 +695,12 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP3]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP5]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP7]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP2]], align 4 // CHECK1-NEXT: ret void // // @@ -713,7 +713,7 @@ // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP2]], i64 [[TMP4]] // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq ptr [[TMP2]], [[TMP5]] // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] @@ -735,7 +735,7 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = call ptr @llvm.threadlocal.address.p0(ptr @{{reduction_size[.].+[.]}}) -// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP2]], align 8, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP4]], i64 [[TMP3]] @@ -744,9 +744,9 @@ // CHECK1: omp.arraycpy.body: // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP7]] to i32 -// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 +// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP8]] to i32 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i8 @@ -789,65 +789,65 @@ // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store i32 [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], ptr [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], ptr [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], ptr [[TMP4]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) -// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) -// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: call void [[TMP13]](ptr [[TMP14]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] -// CHECK1-NEXT: [[TMP16:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 -// CHECK1-NEXT: [[TMP22:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP19]], ptr [[TMP18]]) -// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP24:%.*]] = load ptr, ptr [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP25:%.*]] = load ptr, ptr [[TMP24]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = sext i32 [[TMP28]] to i64 -// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP29]] -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP31:%.*]] = load ptr, ptr [[TMP30]], align 8 -// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP31]], i64 9 -// CHECK1-NEXT: [[TMP32:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 -// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP32]], i64 [[LB_ADD_LEN_I]] -// CHECK1-NEXT: [[TMP33:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 -// CHECK1-NEXT: [[TMP34:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP35:%.*]] = sub i64 [[TMP33]], [[TMP34]] -// CHECK1-NEXT: [[TMP36:%.*]] = sdiv exact i64 [[TMP35]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP37:%.*]] = add nuw i64 [[TMP36]], 1 -// CHECK1-NEXT: [[TMP38:%.*]] = mul nuw i64 [[TMP37]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: store i64 [[TMP37]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !12 -// CHECK1-NEXT: [[TMP39:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP20]], ptr [[TMP39]], ptr [[TMP25]]) -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP12]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[TMP41]], align 8 -// CHECK1-NEXT: [[TMP43:%.*]] = load ptr, ptr [[TMP42]], align 8 -// CHECK1-NEXT: [[TMP44:%.*]] = ptrtoint ptr [[TMP43]] to i64 -// CHECK1-NEXT: [[TMP45:%.*]] = ptrtoint ptr [[TMP25]] to i64 -// CHECK1-NEXT: [[TMP46:%.*]] = sub i64 [[TMP44]], [[TMP45]] -// CHECK1-NEXT: [[TMP47:%.*]] = sdiv exact i64 [[TMP46]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) -// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 [[TMP47]] -// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !12 -// CHECK1-NEXT: store ptr [[TMP48]], ptr [[TMP4_I]], align 8, !noalias !12 +// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], ptr [[TMP3]], i32 0, i32 1 +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META4:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]]) +// CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]]) +// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[DOTPART_ID__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP8]], ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr @.omp_task_privates_map., ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[DOTTASK_T__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP7]], ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[__CONTEXT_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[DOTPRIVATES__ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: call void [[TMP10]](ptr [[TMP11]], ptr [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !13 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !13, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP15]], ptr [[TMP14]]) +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP24:%.*]] = sext i32 [[TMP23]] to i64 +// CHECK1-NEXT: [[LB_ADD_LEN_I:%.*]] = add nsw i64 -1, [[TMP24]] +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP26:%.*]] = load ptr, ptr [[TMP25]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ARRAYIDX2_I:%.*]] = getelementptr inbounds ptr, ptr [[TMP26]], i64 9 +// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[ARRAYIDX2_I]], align 8 +// CHECK1-NEXT: [[ARRAYIDX3_I:%.*]] = getelementptr inbounds i8, ptr [[TMP27]], i64 [[LB_ADD_LEN_I]] +// CHECK1-NEXT: [[TMP28:%.*]] = ptrtoint ptr [[ARRAYIDX3_I]] to i64 +// CHECK1-NEXT: [[TMP29:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP30:%.*]] = sub i64 [[TMP28]], [[TMP29]] +// CHECK1-NEXT: [[TMP31:%.*]] = sdiv exact i64 [[TMP30]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP32:%.*]] = add nuw i64 [[TMP31]], 1 +// CHECK1-NEXT: [[TMP33:%.*]] = mul nuw i64 [[TMP32]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: store i64 [[TMP32]], ptr @{{reduction_size[.].+[.]}}, align 8, !noalias !13 +// CHECK1-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP12]], align 8, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP35:%.*]] = call ptr @__kmpc_task_reduction_get_th_data(i32 [[TMP16]], ptr [[TMP34]], ptr [[TMP20]]) +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], ptr [[TMP9]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP37:%.*]] = load ptr, ptr [[TMP36]], align 8 +// CHECK1-NEXT: [[TMP38:%.*]] = load ptr, ptr [[TMP37]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = ptrtoint ptr [[TMP38]] to i64 +// CHECK1-NEXT: [[TMP40:%.*]] = ptrtoint ptr [[TMP20]] to i64 +// CHECK1-NEXT: [[TMP41:%.*]] = sub i64 [[TMP39]], [[TMP40]] +// CHECK1-NEXT: [[TMP42:%.*]] = sdiv exact i64 [[TMP41]], ptrtoint (ptr getelementptr (i8, ptr null, i32 1) to i64) +// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[TMP35]], i64 [[TMP42]] +// CHECK1-NEXT: store ptr [[TMP4_I]], ptr [[TMP_I]], align 8, !noalias !13 +// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP4_I]], align 8, !noalias !13 // CHECK1-NEXT: ret i32 0 // // @@ -859,38 +859,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void @@ -904,38 +904,38 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP4]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 // CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 1 -// CHECK1-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[TMP2]], i64 0, i64 2 -// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = ptrtoint ptr [[TMP17]] to i64 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[TMP15]], i64 [[TMP18]] -// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP15]], [[TMP21]] +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint ptr [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq ptr [[TMP11]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE5:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] // CHECK1: omp.arraycpy.body: -// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP15]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] -// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP22]] to i32 -// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1 -// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP23]] to i32 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi ptr [ [[TMP9]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi ptr [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV:%.*]] = sext i8 [[TMP18]] to i32 +// CHECK1-NEXT: [[TMP19:%.*]] = load i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 1, !noundef [[NOUNDEF3]] +// CHECK1-NEXT: [[CONV2:%.*]] = sext i8 [[TMP19]] to i32 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[CONV2]] // CHECK1-NEXT: [[CONV4:%.*]] = trunc i32 [[ADD3]] to i8 // CHECK1-NEXT: store i8 [[CONV4]], ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i8, ptr [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 -// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP21]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq ptr [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP17]] // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_BODY]] // CHECK1: omp.arraycpy.done5: // CHECK1-NEXT: ret void diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -129,34 +129,34 @@ // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -164,8 +164,8 @@ // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i64 0, i64 0 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK1-NEXT: ret i32 [[TMP18]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF4:![0-9]+]] +// CHECK1-NEXT: ret i32 [[TMP16]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -173,7 +173,7 @@ // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -196,51 +196,51 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group !4 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -273,74 +273,74 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 456 // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 456 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]] // CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] -// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP14]] to i64 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM8]] -// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -380,34 +380,34 @@ // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -415,8 +415,8 @@ // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK3-NEXT: ret i32 [[TMP18]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF5:![0-9]+]] +// CHECK3-NEXT: ret i32 [[TMP16]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -424,7 +424,7 @@ // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -447,49 +447,49 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_COMB_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group !5 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -522,70 +522,70 @@ // CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP12]], 456 // CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL4]] // CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP13]] -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP14]] -// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -626,51 +626,51 @@ // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK5-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK5-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 123, ptr [[I]], align 4 // CHECK5-NEXT: store i32 456, ptr [[J]], align 4 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A10]], i64 0, i64 0 // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX11]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: ret i32 [[TMP9]] // // @@ -697,49 +697,49 @@ // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK7-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK7-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 123, ptr [[I]], align 4 // CHECK7-NEXT: store i32 456, ptr [[J]], align 4 // CHECK7-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A9]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX10]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: ret i32 [[TMP9]] // // @@ -761,18 +761,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x ptr], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -780,100 +780,100 @@ // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 40, i1 false) -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP25]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP42:%.*]] = load i32, ptr [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP43]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP45]], 0 -// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 -// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] -// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP46]], 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP34]], 0 +// CHECK9-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 +// CHECK9-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] +// CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP35]], 1 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP47]], align 4 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 5, ptr [[TMP48]], align 4 -// CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 8 -// CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[TMP41]], ptr [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP54]], align 8 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP55]], align 8 -// CHECK9-NEXT: [[TMP56:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP57:%.*]] = icmp ne i32 [[TMP56]], 0 -// CHECK9-NEXT: br i1 [[TMP57]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 5, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 8 +// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[TMP30]], ptr [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP41]], align 8 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP42]], align 8 +// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP44]], align 8 +// CHECK9-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK9-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP58]]) +// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP47]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) -// CHECK9-NEXT: [[TMP60:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP60]] +// CHECK9-NEXT: [[TMP48:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP48]]) +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP49]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -889,8 +889,8 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 5, ptr @.omp_outlined., ptr [[N_ADDR]], ptr [[M_ADDR]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]) // CHECK9-NEXT: ret void @@ -929,18 +929,18 @@ // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -949,71 +949,71 @@ // CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_COMB_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] // CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] // CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i64 [[TMP21]], i64 [[TMP22]], ptr [[TMP0]], ptr [[TMP1]], i64 [[TMP2]], i64 [[TMP3]], ptr [[TMP4]]), !llvm.access.group !5 +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i64 [[TMP21]], i64 [[TMP22]], ptr [[TMP0]], ptr [[TMP1]], i64 [[TMP2]], i64 [[TMP3]], ptr [[TMP4]]), !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP23]], [[TMP24]] -// CHECK9-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK9-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP29]], 0 // CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 0, [[MUL17]] // CHECK9-NEXT: store i32 [[ADD18]], ptr [[I11]], align 4 -// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP30]], 0 // CHECK9-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK9-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 @@ -1063,18 +1063,18 @@ // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1083,50 +1083,50 @@ // CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP12]], ptr [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 [[TMP13]], ptr [[DOTOMP_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP15]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] // CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP20]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP21]], [[TMP22]] // CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] @@ -1135,16 +1135,16 @@ // CHECK9-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] // CHECK9-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK9-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 // CHECK9-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] // CHECK9-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 // CHECK9-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP26]], [[CONV25]] -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP28]], 0 // CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 // CHECK9-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] @@ -1154,40 +1154,40 @@ // CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 // CHECK9-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] // CHECK9-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 -// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group !9 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP29]] to i64 // CHECK9-NEXT: [[TMP30:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP30]] -// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP31]] to i64 // CHECK9-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM36]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group !9 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 +// CHECK9-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD38:%.*]] = add nsw i64 [[TMP32]], 1 -// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !9 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP10]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]]) -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 // CHECK9-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP37]], 0 // CHECK9-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK9-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 // CHECK9-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]] // CHECK9-NEXT: store i32 [[ADD42]], ptr [[I11]], align 4 -// CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP38]], 0 // CHECK9-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 // CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[DIV44]], 1 @@ -1213,34 +1213,34 @@ // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store ptr [[A]], ptr [[TMP0]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP12]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes.4, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK9-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72(ptr [[A]]) #[[ATTR3]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1282,45 +1282,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group !14 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i64 [[TMP9]], i64 [[TMP11]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1356,70 +1356,70 @@ // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 2 // CHECK9-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 2 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]] // CHECK9-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] -// CHECK9-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP14]] to i64 // CHECK9-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM8]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX9]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK9-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1464,109 +1464,109 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 // CHECK11-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 // CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 40, i1 false) -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK11-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP18]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP23]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP29]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP41]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP43]], 0 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP45]], 1 +// CHECK11-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP34]], 1 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 5, ptr [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP38]], ptr [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 4 -// CHECK11-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP51]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP54]], align 8 -// CHECK11-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 -// CHECK11-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 5, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP27]], ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP40]], align 4 +// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP42]], align 4 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP43]], align 8 +// CHECK11-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK11-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP57:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP57]]) +// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP46]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP58:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP58]]) -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP59]] +// CHECK11-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK11-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP48]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -1582,8 +1582,8 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 5, ptr @.omp_outlined., ptr [[N_ADDR]], ptr [[M_ADDR]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]) // CHECK11-NEXT: ret void @@ -1622,18 +1622,18 @@ // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1642,73 +1642,73 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8 -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_COMB_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] // CHECK11-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP22:%.*]] = trunc i64 [[TMP21]] to i32 -// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i32 [[TMP22]], i32 [[TMP24]], ptr [[TMP0]], ptr [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], ptr [[TMP4]]), !llvm.access.group !6 +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 7, ptr @.omp_outlined..1, i32 [[TMP22]], i32 [[TMP24]], ptr [[TMP0]], ptr [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], ptr [[TMP4]]), !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP25]], [[TMP26]] -// CHECK11-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP28]]) -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK11-NEXT: br i1 [[TMP30]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK11-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK11-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1 // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 0, [[MUL17]] // CHECK11-NEXT: store i32 [[ADD18]], ptr [[I11]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK11-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK11-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 @@ -1758,18 +1758,18 @@ // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1778,52 +1778,52 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CONV11:%.*]] = zext i32 [[TMP12]] to i64 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CONV12:%.*]] = zext i32 [[TMP13]] to i64 // CHECK11-NEXT: store i64 [[CONV11]], ptr [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 [[CONV12]], ptr [[DOTOMP_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP14:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB2:[0-9]+]], i32 [[TMP15]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP15:%.*]] = icmp sgt i64 [[TMP16]], [[TMP17]] // CHECK11-NEXT: br i1 [[CMP15]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP20]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP16:%.*]] = icmp sle i64 [[TMP21]], [[TMP22]] // CHECK11-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB17:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV18:%.*]] = sdiv i32 [[SUB17]], 1 // CHECK11-NEXT: [[MUL19:%.*]] = mul nsw i32 1, [[DIV18]] @@ -1832,16 +1832,16 @@ // CHECK11-NEXT: [[MUL22:%.*]] = mul nsw i64 [[DIV21]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL22]] // CHECK11-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK11-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[CONV23]], ptr [[I13]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK11-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] // CHECK11-NEXT: [[CONV27:%.*]] = sext i32 [[MUL26]] to i64 // CHECK11-NEXT: [[DIV28:%.*]] = sdiv i64 [[TMP26]], [[CONV27]] -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP28]], 0 // CHECK11-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 // CHECK11-NEXT: [[MUL31:%.*]] = mul nsw i32 1, [[DIV30]] @@ -1851,38 +1851,38 @@ // CHECK11-NEXT: [[MUL35:%.*]] = mul nsw i64 [[SUB34]], 1 // CHECK11-NEXT: [[ADD36:%.*]] = add nsw i64 0, [[MUL35]] // CHECK11-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 -// CHECK11-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group !10 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 [[CONV37]], ptr [[J14]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[I13]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP30:%.*]] = mul nsw i32 [[TMP29]], [[TMP3]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 [[TMP30]] -// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[J14]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP31]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX38]], align 4, !llvm.access.group !10 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX38]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 +// CHECK11-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD39:%.*]] = add nsw i64 [[TMP32]], 1 -// CHECK11-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !10 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD39]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP11]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP33:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4 +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[TMP33]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP34]]) -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 // CHECK11-NEXT: br i1 [[TMP36]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP37]], 0 // CHECK11-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK11-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 // CHECK11-NEXT: [[ADD43:%.*]] = add nsw i32 0, [[MUL42]] // CHECK11-NEXT: store i32 [[ADD43]], ptr [[I13]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP38]], 0 // CHECK11-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 // CHECK11-NEXT: [[MUL46:%.*]] = mul nsw i32 [[DIV45]], 1 @@ -1908,34 +1908,34 @@ // CHECK11-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store ptr [[A]], ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes.4, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes.4, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes.5, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK11-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72(ptr [[A]]) #[[ATTR3]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1977,43 +1977,43 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group !15 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 3, ptr @.omp_outlined..3, i32 [[TMP8]], i32 [[TMP9]], ptr [[TMP0]]), !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK11-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -2049,66 +2049,66 @@ // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP12]], 2 // CHECK11-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL4]] // CHECK11-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP13]] -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP14]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], 1 -// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -2154,9 +2154,9 @@ // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -2164,15 +2164,15 @@ // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP9]], 0 // CHECK13-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK13-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -2180,29 +2180,29 @@ // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK13-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP10]], ptr [[DOTOMP_UB]], align 8 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP11]] // CHECK13-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: land.lhs.true: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP12]] // CHECK13-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP13]], ptr [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP14]], [[TMP15]] // CHECK13-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP17]], 0 // CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK13-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -2211,16 +2211,16 @@ // CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK13-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK13-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK13-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK13-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP19]], [[CONV22]] -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK13-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -2230,31 +2230,31 @@ // CHECK13-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK13-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK13-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64 // CHECK13-NEXT: [[TMP23:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP23]] -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP24]] to i64 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM33]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD35:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK13-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 // CHECK13-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 1 // CHECK13-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] // CHECK13-NEXT: store i32 [[ADD39]], ptr [[I9]], align 4 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK13-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 @@ -2262,7 +2262,7 @@ // CHECK13-NEXT: store i32 [[ADD43]], ptr [[J10]], align 4 // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -2286,43 +2286,43 @@ // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK13-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK13-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM6]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 // CHECK13-NEXT: store i32 2, ptr [[J]], align 4 @@ -2357,23 +2357,23 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK15-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK15-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -2381,29 +2381,29 @@ // CHECK15-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK15-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_UB]], align 8 // CHECK15-NEXT: store i32 0, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK15-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: land.lhs.true: -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK15-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP12]], [[TMP13]] // CHECK15-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP15]], 0 // CHECK15-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK15-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -2412,16 +2412,16 @@ // CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK15-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK15-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK15-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK15-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK15-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP17]], [[CONV22]] -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP19]], 0 // CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK15-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -2431,29 +2431,29 @@ // CHECK15-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK15-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK15-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 [[TMP20]], [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]] -// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP22]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD34:%.*]] = add nsw i64 [[TMP23]], 1 -// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB35:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV36:%.*]] = sdiv i32 [[SUB35]], 1 // CHECK15-NEXT: [[MUL37:%.*]] = mul nsw i32 [[DIV36]], 1 // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 0, [[MUL37]] // CHECK15-NEXT: store i32 [[ADD38]], ptr [[I9]], align 4 -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 @@ -2461,7 +2461,7 @@ // CHECK15-NEXT: store i32 [[ADD42]], ptr [[J10]], align 4 // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: -// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -2485,41 +2485,41 @@ // CHECK15-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK15-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK15-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 // CHECK15-NEXT: store i32 2, ptr [[J]], align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_proc_bind_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_proc_bind_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_proc_bind_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_proc_bind_codegen.cpp @@ -141,46 +141,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group !6 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 4), !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !6 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..1, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP7]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -210,54 +210,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -293,46 +293,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group !15 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 3), !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..3, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP16]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -362,54 +362,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -478,46 +478,46 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group !21 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: call void @__kmpc_push_proc_bind(ptr @[[GLOB3]], i32 [[TMP1]], i32 2), !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !21 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 2, ptr @.omp_outlined..5, i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group [[ACC_GRP22]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !21 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP22]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !21 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]]) -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -547,54 +547,54 @@ // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP3]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 999 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 999, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]], !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !24 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]]) -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -628,52 +628,52 @@ // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB3]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB3]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV5]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] // CHECK3: omp.inner.for.cond7: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK3: omp.inner.for.body9: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] -// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD11]], ptr [[I6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK3: omp.body.continue12: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK3: omp.inner.for.inc13: -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 -// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV5]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end15: // CHECK3-NEXT: store i32 1000, ptr [[I6]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -690,27 +690,27 @@ // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 999, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF2]] // CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 -// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: store i32 1000, ptr [[I]], align 4 // CHECK3-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_reduction_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_reduction_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_reduction_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_reduction_codegen.cpp @@ -107,39 +107,39 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70(i64 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -182,45 +182,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group !5 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -229,21 +229,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -274,9 +274,9 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -284,49 +284,49 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[SIVAR2]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !9 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -335,21 +335,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -363,15 +363,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -383,15 +383,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -407,41 +407,41 @@ // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK1-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK1-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP2]]) #[[ATTR2]] +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP1]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: ret i32 0 @@ -481,45 +481,45 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i64 [[TMP9]], i64 [[TMP11]], ptr [[T_VAR1]]), !llvm.access.group !14 +// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i64 [[TMP9]], i64 [[TMP11]], ptr [[T_VAR1]]), !llvm.access.group [[ACC_GRP15]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !14 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !14 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -528,21 +528,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -573,9 +573,9 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -583,49 +583,49 @@ // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR2]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[T_VAR2]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[T_VAR2]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !17 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -634,21 +634,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR2]], ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR2]], align 4 -// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK1-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR2]], align 4 -// CHECK1-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR2]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: // CHECK1-NEXT: ret void @@ -662,15 +662,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -682,15 +682,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -711,39 +711,39 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l70(i32 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -786,43 +786,43 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[SIVAR1]]), !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i32 [[TMP8]], i32 [[TMP9]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -831,21 +831,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -876,57 +876,57 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[SIVAR1]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !10 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -935,21 +935,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -963,15 +963,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -983,15 +983,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -1007,41 +1007,41 @@ // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP5]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK3-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB4]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP2]]) #[[ATTR2]] +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP1]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: ret i32 0 @@ -1081,43 +1081,43 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i32 [[TMP8]], i32 [[TMP9]], ptr [[T_VAR1]]), !llvm.access.group !15 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..4, i32 [[TMP8]], i32 [[TMP9]], ptr [[T_VAR1]]), !llvm.access.group [[ACC_GRP16]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP16]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !15 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP16]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1126,21 +1126,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.6, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -1171,57 +1171,57 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !18 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP19]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -1230,21 +1230,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.5, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: // CHECK3-NEXT: ret void @@ -1258,15 +1258,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -1278,15 +1278,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -1310,36 +1310,36 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -1361,37 +1361,37 @@ // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !6 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK5-NEXT: ret i32 0 // @@ -1409,36 +1409,36 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() @@ -1460,37 +1460,37 @@ // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !7 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: ret i32 0 // @@ -1539,45 +1539,45 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group !4 +// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB4]], i32 3, ptr @.omp_outlined..1, i64 [[TMP9]], i64 [[TMP11]], ptr [[SIVAR1]]), !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1586,21 +1586,21 @@ // CHECK9: .omp.final.done: // CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK9-NEXT: switch i32 [[TMP19]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK9-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK9-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK9-NEXT: ] // CHECK9: .omp.reduction.case1: -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] // CHECK9-NEXT: store i32 [[ADD3]], ptr [[TMP0]], align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.case2: -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[TMP23:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP22]] monotonic, align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.default: // CHECK9-NEXT: ret void @@ -1632,9 +1632,9 @@ // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32 // CHECK9-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4 @@ -1642,52 +1642,52 @@ // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: store i32 0, ptr [[SIVAR2]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP4]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !8 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group !8 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], [[TMP11]] -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group !8 +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[SIVAR2]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[SIVAR2]], ptr [[TMP13]], align 8, !llvm.access.group !8 -// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !8 +// CHECK9-NEXT: store ptr [[SIVAR2]], ptr [[TMP13]], align 8, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK9-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !8 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP4]]) -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK9-NEXT: br i1 [[TMP16]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1696,21 +1696,21 @@ // CHECK9: .omp.final.done: // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK9-NEXT: store ptr [[SIVAR2]], ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK9-NEXT: switch i32 [[TMP20]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK9-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK9-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK9-NEXT: ] // CHECK9: .omp.reduction.case1: -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP21]], [[TMP22]] +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] // CHECK9-NEXT: store i32 [[ADD6]], ptr [[TMP0]], align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce_nowait(ptr @[[GLOB3]], i32 [[TMP4]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.case2: -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[SIVAR2]], align 4 -// CHECK9-NEXT: [[TMP24:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR2]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP22:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4 // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.default: // CHECK9-NEXT: ret void @@ -1724,15 +1724,15 @@ // CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK9-NEXT: ret void // // @@ -1744,15 +1744,15 @@ // CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK9-NEXT: ret void // // diff --git a/clang/test/OpenMP/teams_distribute_simd_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_codegen.cpp @@ -236,22 +236,22 @@ // CHECK1-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128 // CHECK1-NEXT: store i32 [[DIV]], ptr [[TE]], align 4 // CHECK1-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP1]], ptr [[TE_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[TE_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[TE_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP3]], ptr [[TH_CASTED]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TH_CASTED]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[TH_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[I_CASTED]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[I_CASTED]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[I_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP7]], ptr [[N_CASTED]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -284,15 +284,15 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TE]], align 4 -// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 // CHECK1-NEXT: [[DIV2:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV2]], 1 // CHECK1-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 // CHECK1-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -321,9 +321,9 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z21teams_argument_globali_l30(i64 [[TMP2]], i64 [[TMP4]], ptr @a, i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR3:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: -// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP42]], ptr [[N_CASTED4]], align 4 -// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[N_CASTED4]], align 8 +// CHECK1-NEXT: [[TMP43:%.*]] = load i64, ptr [[N_CASTED4]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP43]], ptr [[TMP44]], align 8 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -338,14 +338,14 @@ // CHECK1-NEXT: store ptr null, ptr [[TMP49]], align 8 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP52]], ptr [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP53]], 0 // CHECK1-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 // CHECK1-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 // CHECK1-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP54]], 1 // CHECK1-NEXT: [[TMP55:%.*]] = zext i32 [[ADD14]] to i64 // CHECK1-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -374,7 +374,7 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z21teams_argument_globali_l36(i64 [[TMP43]], ptr @a) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT17]] // CHECK1: omp_offload.cont17: -// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr @a, align 4 +// CHECK1-NEXT: [[TMP67:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: ret i32 [[TMP67]] // // @@ -393,8 +393,8 @@ // CHECK1-NEXT: store i64 [[I]], ptr [[I_ADDR]], align 8 // CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[I_ADDR]], ptr [[N_ADDR]], ptr [[TMP1]]) // CHECK1-NEXT: ret void @@ -426,77 +426,77 @@ // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I3]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i64 16) ] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP2]], i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK1-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: -// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK1-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1 // CHECK1-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1 @@ -544,53 +544,53 @@ // CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK1-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK1: omp.precond.then: // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], ptr [[I3]], align 4 -// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I3]], align 4 +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I3]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP16]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]] // CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4 @@ -598,21 +598,21 @@ // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK1-NEXT: br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: -// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP22]], 0 // CHECK1-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK1-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -657,22 +657,22 @@ // CHECK3-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128 // CHECK3-NEXT: store i32 [[DIV]], ptr [[TE]], align 4 // CHECK3-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TE_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[TH_CASTED]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TH_CASTED]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TH_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[I_CASTED]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[I_CASTED]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[I_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP7]], ptr [[N_CASTED]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP9]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -705,15 +705,15 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TE]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP27]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 // CHECK3-NEXT: [[DIV2:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV2]], 1 // CHECK3-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 // CHECK3-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -742,9 +742,9 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z21teams_argument_globali_l30(i32 [[TMP2]], i32 [[TMP4]], ptr @a, i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR3:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: -// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP42:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP42]], ptr [[N_CASTED4]], align 4 -// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[N_CASTED4]], align 4 +// CHECK3-NEXT: [[TMP43:%.*]] = load i32, ptr [[N_CASTED4]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP43]], ptr [[TMP44]], align 4 // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 @@ -759,14 +759,14 @@ // CHECK3-NEXT: store ptr null, ptr [[TMP49]], align 4 // CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK3-NEXT: [[TMP52:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP52]], ptr [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK3-NEXT: [[TMP53:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_9]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP53]], 0 // CHECK3-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 // CHECK3-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 // CHECK3-NEXT: store i32 [[SUB13]], ptr [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK3-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK3-NEXT: [[TMP54:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_10]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP54]], 1 // CHECK3-NEXT: [[TMP55:%.*]] = zext i32 [[ADD14]] to i64 // CHECK3-NEXT: [[KERNEL_ARGS15:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 @@ -795,7 +795,7 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z21teams_argument_globali_l36(i32 [[TMP43]], ptr @a) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT17]] // CHECK3: omp_offload.cont17: -// CHECK3-NEXT: [[TMP67:%.*]] = load i32, ptr @a, align 4 +// CHECK3-NEXT: [[TMP67:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: ret i32 [[TMP67]] // // @@ -814,8 +814,8 @@ // CHECK3-NEXT: store i32 [[I]], ptr [[I_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[I_ADDR]], ptr [[N_ADDR]], ptr [[TMP1]]) // CHECK3-NEXT: ret void @@ -847,76 +847,76 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[I_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK3-NEXT: store i32 0, ptr [[I3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[ARRAYDECAY]], i32 16) ] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP2]], i32 0, i32 [[TMP17]] -// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK3-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: -// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB8:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK3-NEXT: [[DIV9:%.*]] = sdiv i32 [[SUB8]], 1 // CHECK3-NEXT: [[MUL10:%.*]] = mul nsw i32 [[DIV9]], 1 @@ -964,74 +964,74 @@ // CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK3-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK3: omp.precond.then: // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]] // CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], ptr [[I3]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I3]], align 4 +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I3]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr [[TMP1]], i32 0, i32 [[TMP16]] // CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP18]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP19]]) -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK3-NEXT: br i1 [[TMP21]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: -// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP22]], 0 // CHECK3-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK3-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -1077,55 +1077,55 @@ // CHECK5-NEXT: [[DOTOMP_IV24:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[I25:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128 // CHECK5-NEXT: store i32 [[DIV]], ptr [[TE]], align 4 // CHECK5-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK5-NEXT: [[DIV2:%.*]] = sdiv i32 [[SUB]], 1 // CHECK5-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV2]], 1 // CHECK5-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 0, ptr [[I4]], align 4 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK5-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK5: simd.if.then: -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @a, i64 16) ] -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP6]], ptr [[DOTLINEAR_START]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP10:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP10]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK5-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK5-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: -// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK5-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK5-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1 // CHECK5-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1 @@ -1133,35 +1133,35 @@ // CHECK5-NEXT: store i32 [[ADD12]], ptr [[I]], align 4 // CHECK5-NEXT: br label [[SIMD_IF_END]] // CHECK5: simd.if.end: -// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK5-NEXT: [[TMP13:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_14]], align 4 -// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK5-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB16:%.*]] = sub nsw i32 [[TMP14]], 0 // CHECK5-NEXT: [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1 // CHECK5-NEXT: [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1 // CHECK5-NEXT: store i32 [[SUB18]], ptr [[DOTCAPTURE_EXPR_15]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB19]], align 4 -// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_15]], align 4 +// CHECK5-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_15]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_UB20]], align 4 // CHECK5-NEXT: store i32 0, ptr [[I21]], align 4 -// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK5-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP22:%.*]] = icmp slt i32 0, [[TMP16]] // CHECK5-NEXT: br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END41:%.*]] // CHECK5: simd.if.then23: -// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4 +// CHECK5-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV24]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]] // CHECK5: omp.inner.for.cond26: -// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 -// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4 +// CHECK5-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] // CHECK5-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END36:%.*]] // CHECK5: omp.inner.for.body28: -// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 +// CHECK5-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL29:%.*]] = mul nsw i32 [[TMP20]], 1 // CHECK5-NEXT: [[ADD30:%.*]] = add nsw i32 0, [[MUL29]] // CHECK5-NEXT: store i32 [[ADD30]], ptr [[I25]], align 4 -// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[I25]], align 4 +// CHECK5-NEXT: [[TMP21:%.*]] = load i32, ptr [[I25]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM31:%.*]] = sext i32 [[TMP21]] to i64 // CHECK5-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i64 0, i64 [[IDXPROM31]] // CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX32]], align 4 @@ -1169,12 +1169,12 @@ // CHECK5: omp.body.continue33: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC34:%.*]] // CHECK5: omp.inner.for.inc34: -// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 +// CHECK5-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD35:%.*]] = add nsw i32 [[TMP22]], 1 // CHECK5-NEXT: store i32 [[ADD35]], ptr [[DOTOMP_IV24]], align 4 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end36: -// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK5-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK5-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 // CHECK5-NEXT: [[MUL39:%.*]] = mul nsw i32 [[DIV38]], 1 @@ -1182,7 +1182,7 @@ // CHECK5-NEXT: store i32 [[ADD40]], ptr [[I25]], align 4 // CHECK5-NEXT: br label [[SIMD_IF_END41]] // CHECK5: simd.if.end41: -// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr @a, align 4 +// CHECK5-NEXT: [[TMP24:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: ret i32 [[TMP24]] // // @@ -1212,54 +1212,54 @@ // CHECK7-NEXT: [[DOTOMP_IV24:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[I25:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128 // CHECK7-NEXT: store i32 [[DIV]], ptr [[TE]], align 4 // CHECK7-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0 // CHECK7-NEXT: [[DIV2:%.*]] = sdiv i32 [[SUB]], 1 // CHECK7-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV2]], 1 // CHECK7-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP3]], ptr [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 0, ptr [[I4]], align 4 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]] // CHECK7-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK7: simd.if.then: -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @a, i32 16) ] -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP6]], ptr [[DOTLINEAR_START]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP10:%.*]] = load i32, ptr [[I5]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i32 0, i32 [[TMP10]] -// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK7-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: -// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK7-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[SUB9:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK7-NEXT: [[DIV10:%.*]] = sdiv i32 [[SUB9]], 1 // CHECK7-NEXT: [[MUL11:%.*]] = mul nsw i32 [[DIV10]], 1 @@ -1267,47 +1267,47 @@ // CHECK7-NEXT: store i32 [[ADD12]], ptr [[I]], align 4 // CHECK7-NEXT: br label [[SIMD_IF_END]] // CHECK7: simd.if.end: -// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[N_ADDR]], align 4 +// CHECK7-NEXT: [[TMP13:%.*]] = load i32, ptr [[N_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP13]], ptr [[DOTCAPTURE_EXPR_14]], align 4 -// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK7-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[SUB16:%.*]] = sub nsw i32 [[TMP14]], 0 // CHECK7-NEXT: [[DIV17:%.*]] = sdiv i32 [[SUB16]], 1 // CHECK7-NEXT: [[SUB18:%.*]] = sub nsw i32 [[DIV17]], 1 // CHECK7-NEXT: store i32 [[SUB18]], ptr [[DOTCAPTURE_EXPR_15]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB19]], align 4 -// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_15]], align 4 +// CHECK7-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_15]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_UB20]], align 4 // CHECK7-NEXT: store i32 0, ptr [[I21]], align 4 -// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK7-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP22:%.*]] = icmp slt i32 0, [[TMP16]] // CHECK7-NEXT: br i1 [[CMP22]], label [[SIMD_IF_THEN23:%.*]], label [[SIMD_IF_END40:%.*]] // CHECK7: simd.if.then23: -// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4 +// CHECK7-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_LB19]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: store i32 [[TMP17]], ptr [[DOTOMP_IV24]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND26:%.*]] // CHECK7: omp.inner.for.cond26: -// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_UB20]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP27:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] // CHECK7-NEXT: br i1 [[CMP27]], label [[OMP_INNER_FOR_BODY28:%.*]], label [[OMP_INNER_FOR_END35:%.*]] // CHECK7: omp.inner.for.body28: -// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL29:%.*]] = mul nsw i32 [[TMP20]], 1 // CHECK7-NEXT: [[ADD30:%.*]] = add nsw i32 0, [[MUL29]] // CHECK7-NEXT: store i32 [[ADD30]], ptr [[I25]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[I25]], align 4 +// CHECK7-NEXT: [[TMP21:%.*]] = load i32, ptr [[I25]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [100 x i32], ptr @a, i32 0, i32 [[TMP21]] // CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX31]], align 4 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE32:%.*]] // CHECK7: omp.body.continue32: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC33:%.*]] // CHECK7: omp.inner.for.inc33: -// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV24]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD34:%.*]] = add nsw i32 [[TMP22]], 1 // CHECK7-NEXT: store i32 [[ADD34]], ptr [[DOTOMP_IV24]], align 4 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND26]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end35: -// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_14]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK7-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 // CHECK7-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 1 @@ -1315,7 +1315,7 @@ // CHECK7-NEXT: store i32 [[ADD39]], ptr [[I25]], align 4 // CHECK7-NEXT: br label [[SIMD_IF_END40]] // CHECK7: simd.if.end40: -// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr @a, align 4 +// CHECK7-NEXT: [[TMP24:%.*]] = load i32, ptr @a, align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: ret i32 [[TMP24]] // // @@ -1334,15 +1334,15 @@ // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4 // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 @@ -1368,14 +1368,14 @@ // CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1 // CHECK9-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -1405,7 +1405,7 @@ // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 -// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP35:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP35]]) // CHECK9-NEXT: ret i32 [[TMP34]] @@ -1420,7 +1420,7 @@ // CHECK9-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]]) // CHECK9-NEXT: ret void @@ -1450,77 +1450,77 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]] -// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]] -// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK9-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK9-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -1555,14 +1555,14 @@ // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK11-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4 // CHECK11-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 // CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false) @@ -1589,14 +1589,14 @@ // CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1 // CHECK11-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -1626,7 +1626,7 @@ // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[TMP35:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP35]]) // CHECK11-NEXT: ret i32 [[TMP34]] @@ -1641,7 +1641,7 @@ // CHECK11-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]]) // CHECK11-NEXT: ret void @@ -1671,76 +1671,76 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK11-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK11-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP17]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK11-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK11-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK11-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -1775,54 +1775,54 @@ // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]] // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK13-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 @@ -1831,7 +1831,7 @@ // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: // CHECK13-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 0 -// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4 +// CHECK13-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(ptr [[TMP15]]) // CHECK13-NEXT: ret i32 [[TMP14]] @@ -1852,52 +1852,52 @@ // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK15-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 // CHECK15-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK15-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP10]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK15-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK15-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK15-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 @@ -1906,7 +1906,7 @@ // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: // CHECK15-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 0 -// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP14:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(ptr [[TMP14]]) // CHECK15-NEXT: ret i32 [[TMP13]] @@ -1990,7 +1990,7 @@ // CHECK17: omp_offload.cont: // CHECK17-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A2]], i64 0, i64 0 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK17-NEXT: ret i32 [[TMP29]] // // @@ -1999,7 +1999,7 @@ // CHECK17-NEXT: entry: // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 // CHECK17-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK17-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK17-NEXT: ret void // @@ -2020,59 +2020,59 @@ // CHECK17-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK17-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK17-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK17-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: -// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK17-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK17-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK17-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: -// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]] -// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK17-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: -// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK17-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP9:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: [[TMP9:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[CONV:%.*]] = fptosi float [[TMP9]] to i32 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP10]] to i64 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK17-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: -// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK17-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] -// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK17-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK17-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: @@ -2167,7 +2167,7 @@ // CHECK19: omp_offload.cont: // CHECK19-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A2]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK19-NEXT: ret i32 [[TMP29]] // // @@ -2176,7 +2176,7 @@ // CHECK19-NEXT: entry: // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 // CHECK19-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK19-NEXT: ret void // @@ -2197,58 +2197,58 @@ // CHECK19-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK19-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK19-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: -// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK19-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK19-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: -// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK19-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: -// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK19-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP9:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: [[TMP9:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[CONV:%.*]] = fptosi float [[TMP9]] to i32 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP10]] -// CHECK19-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: -// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK19-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK19-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK19-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK19-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: @@ -2343,7 +2343,7 @@ // CHECK21: omp_offload.cont: // CHECK21-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A2]], i64 0, i64 0 -// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK21-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK21-NEXT: ret i32 [[TMP29]] // // @@ -2354,17 +2354,17 @@ // CHECK21-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP1:%.*]] = load float, ptr [[B]], align 4 +// CHECK21-NEXT: [[TMP1:%.*]] = load float, ptr [[B]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP1]], 0.000000e+00 // CHECK21-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK21-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK21-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK21-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK21-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK21-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK21-NEXT: [[TMP3:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF4]] // CHECK21-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined., ptr [[TMP0]], i64 [[TMP3]]) // CHECK21-NEXT: ret void // @@ -2387,77 +2387,77 @@ // CHECK21-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 -// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK21-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK21-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: -// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK21-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK21-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: -// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]] -// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK21-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: -// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK21-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !5, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !6, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[CONV:%.*]] = fptosi float [[TMP10]] to i32 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK21-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: -// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK21-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK21-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND3:%.*]] // CHECK21: omp.inner.for.cond3: -// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK21-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF4]] +// CHECK21-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY5:%.*]], label [[OMP_INNER_FOR_END16:%.*]] // CHECK21: omp.inner.for.body5: -// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK21-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] // CHECK21-NEXT: store i32 [[ADD7]], ptr [[I]], align 4 // CHECK21-NEXT: [[B8:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP16:%.*]] = load float, ptr [[B8]], align 4 +// CHECK21-NEXT: [[TMP16:%.*]] = load float, ptr [[B8]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[CONV9:%.*]] = fptosi float [[TMP16]] to i32 // CHECK21-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4 +// CHECK21-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP17]] to i64 // CHECK21-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [123 x i32], ptr [[A10]], i64 0, i64 [[IDXPROM11]] // CHECK21-NEXT: store i32 [[CONV9]], ptr [[ARRAYIDX12]], align 4 @@ -2465,17 +2465,17 @@ // CHECK21: omp.body.continue13: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] // CHECK21: omp.inner.for.inc14: -// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK21-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK21-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_IV]], align 4 -// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND3]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK21-NEXT: br label [[OMP_INNER_FOR_COND3]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK21: omp.inner.for.end16: // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK21-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK21-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 // CHECK21-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: @@ -2570,7 +2570,7 @@ // CHECK23: omp_offload.cont: // CHECK23-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A2]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +// CHECK23-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK23-NEXT: ret i32 [[TMP29]] // // @@ -2581,17 +2581,17 @@ // CHECK23-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP1:%.*]] = load float, ptr [[B]], align 4 +// CHECK23-NEXT: [[TMP1:%.*]] = load float, ptr [[B]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP1]], 0.000000e+00 // CHECK23-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK23-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK23-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK23-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK23-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK23-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 2, ptr @.omp_outlined., ptr [[TMP0]], i32 [[TMP3]]) // CHECK23-NEXT: ret void // @@ -2614,93 +2614,93 @@ // CHECK23-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 -// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK23-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK23-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: -// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK23-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK23-NEXT: [[TMP6:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: -// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK23-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: -// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK23-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !6, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP10:%.*]] = load float, ptr [[B]], align 4, !nontemporal !7, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[CONV:%.*]] = fptosi float [[TMP10]] to i32 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP11]] -// CHECK23-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: -// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK23-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK23-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND3:%.*]] // CHECK23: omp.inner.for.cond3: -// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK23-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] +// CHECK23-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY5:%.*]], label [[OMP_INNER_FOR_END15:%.*]] // CHECK23: omp.inner.for.body5: -// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]] // CHECK23-NEXT: store i32 [[ADD7]], ptr [[I]], align 4 // CHECK23-NEXT: [[B8:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP16:%.*]] = load float, ptr [[B8]], align 4 +// CHECK23-NEXT: [[TMP16:%.*]] = load float, ptr [[B8]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[CONV9:%.*]] = fptosi float [[TMP16]] to i32 // CHECK23-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[TMP0]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4 +// CHECK23-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [123 x i32], ptr [[A10]], i32 0, i32 [[TMP17]] // CHECK23-NEXT: store i32 [[CONV9]], ptr [[ARRAYIDX11]], align 4 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] // CHECK23: omp.body.continue12: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] // CHECK23: omp.inner.for.inc13: -// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK23-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK23-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_IV]], align 4 -// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND3]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK23-NEXT: br label [[OMP_INNER_FOR_COND3]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK23: omp.inner.for.end15: // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK23-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK23-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 // CHECK23-NEXT: br i1 [[TMP20]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: @@ -2738,40 +2738,40 @@ // CHECK25-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK25-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK25-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 -// CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK25-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK25-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK25: omp.inner.for.cond: -// CHECK25-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK25-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK25-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK25-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK25-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK25: omp.inner.for.body: -// CHECK25-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK25-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK25-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP4:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: [[TMP4:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK25-NEXT: [[CONV:%.*]] = fptosi float [[TMP4]] to i32 // CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK25-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP5]] to i64 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK25-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK25: omp.body.continue: // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK25: omp.inner.for.inc: -// CHECK25-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK25-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK25-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK25-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK25-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK25: omp.inner.for.end: // CHECK25-NEXT: store i32 123, ptr [[I]], align 4 // CHECK25-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK25-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [123 x i32], ptr [[A3]], i64 0, i64 0 -// CHECK25-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 +// CHECK25-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF2]] // CHECK25-NEXT: ret i32 [[TMP7]] // // @@ -2796,39 +2796,39 @@ // CHECK27-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK27-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK27-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 -// CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK27-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK27-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK27: omp.inner.for.cond: -// CHECK27-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK27-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK27-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK27-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK27-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK27: omp.inner.for.body: -// CHECK27-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK27-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK27-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP4:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: [[TMP4:%.*]] = load float, ptr [[B]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK27-NEXT: [[CONV:%.*]] = fptosi float [[TMP4]] to i32 // CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP5]] -// CHECK27-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK27: omp.body.continue: // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK27: omp.inner.for.inc: -// CHECK27-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK27-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK27-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK27-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK27: omp.inner.for.end: // CHECK27-NEXT: store i32 123, ptr [[I]], align 4 // CHECK27-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK27-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [123 x i32], ptr [[A3]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 +// CHECK27-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF3]] // CHECK27-NEXT: ret i32 [[TMP7]] // // @@ -2853,64 +2853,64 @@ // CHECK29-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 // CHECK29-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK29-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 1 -// CHECK29-NEXT: [[TMP0:%.*]] = load float, ptr [[B]], align 4 +// CHECK29-NEXT: [[TMP0:%.*]] = load float, ptr [[B]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK29-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP0]], 0.000000e+00 // CHECK29-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK29-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK29-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK29-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 -// CHECK29-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK29-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK29-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK29-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK29-NEXT: br i1 [[TOBOOL2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK29: omp_if.then: // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK29: omp.inner.for.cond: -// CHECK29-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK29-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK29-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK29-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK29: omp.inner.for.body: -// CHECK29-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 1 // CHECK29-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK29-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK29-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK29-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !3, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32 // CHECK29-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK29-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP7]] to i64 // CHECK29-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK29-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK29-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK29: omp.body.continue: // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK29: omp.inner.for.inc: -// CHECK29-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK29-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK29-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK29-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK29-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK29-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK29: omp.inner.for.end: // CHECK29-NEXT: br label [[OMP_IF_END:%.*]] // CHECK29: omp_if.else: // CHECK29-NEXT: br label [[OMP_INNER_FOR_COND5:%.*]] // CHECK29: omp.inner.for.cond5: -// CHECK29-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK29-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK29-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK29-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK29-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY7:%.*]], label [[OMP_INNER_FOR_END18:%.*]] // CHECK29: omp.inner.for.body7: -// CHECK29-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK29-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK29-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] // CHECK29-NEXT: store i32 [[ADD9]], ptr [[I]], align 4 // CHECK29-NEXT: [[B10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK29-NEXT: [[TMP12:%.*]] = load float, ptr [[B10]], align 4 +// CHECK29-NEXT: [[TMP12:%.*]] = load float, ptr [[B10]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[CONV11:%.*]] = fptosi float [[TMP12]] to i32 // CHECK29-NEXT: [[A12:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK29-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK29-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP13]] to i64 // CHECK29-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], ptr [[A12]], i64 0, i64 [[IDXPROM13]] // CHECK29-NEXT: store i32 [[CONV11]], ptr [[ARRAYIDX14]], align 4 @@ -2918,17 +2918,17 @@ // CHECK29: omp.body.continue15: // CHECK29-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] // CHECK29: omp.inner.for.inc16: -// CHECK29-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK29-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP14]], 1 // CHECK29-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 -// CHECK29-NEXT: br label [[OMP_INNER_FOR_COND5]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK29-NEXT: br label [[OMP_INNER_FOR_COND5]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK29: omp.inner.for.end18: // CHECK29-NEXT: br label [[OMP_IF_END]] // CHECK29: omp_if.end: // CHECK29-NEXT: store i32 123, ptr [[I]], align 4 // CHECK29-NEXT: [[A19:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK29-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [123 x i32], ptr [[A19]], i64 0, i64 0 -// CHECK29-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4 +// CHECK29-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX20]], align 4, !noundef [[NOUNDEF2]] // CHECK29-NEXT: ret i32 [[TMP15]] // // @@ -2953,80 +2953,80 @@ // CHECK31-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 // CHECK31-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK31-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 1 -// CHECK31-NEXT: [[TMP0:%.*]] = load float, ptr [[B]], align 4 +// CHECK31-NEXT: [[TMP0:%.*]] = load float, ptr [[B]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK31-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP0]], 0.000000e+00 // CHECK31-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK31-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 // CHECK31-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK31-NEXT: store i32 122, ptr [[DOTOMP_UB]], align 4 -// CHECK31-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK31-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 -// CHECK31-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK31-NEXT: [[TMP2:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP2]] to i1 // CHECK31-NEXT: br i1 [[TOBOOL2]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK31: omp_if.then: // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK31: omp.inner.for.cond: -// CHECK31-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK31-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK31-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP3]], [[TMP4]] // CHECK31-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK31: omp.inner.for.body: -// CHECK31-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP5]], 1 // CHECK31-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK31-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK31-NEXT: [[B3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK31-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP6:%.*]] = load float, ptr [[B3]], align 4, !nontemporal !5, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[CONV:%.*]] = fptosi float [[TMP6]] to i32 // CHECK31-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK31-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], ptr [[A]], i32 0, i32 [[TMP7]] -// CHECK31-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: store i32 [[CONV]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK31: omp.body.continue: // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK31: omp.inner.for.inc: -// CHECK31-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK31-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK31-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK31-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK31-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK31-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK31: omp.inner.for.end: // CHECK31-NEXT: br label [[OMP_IF_END:%.*]] // CHECK31: omp_if.else: // CHECK31-NEXT: br label [[OMP_INNER_FOR_COND5:%.*]] // CHECK31: omp.inner.for.cond5: -// CHECK31-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK31-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK31-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK31-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK31-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY7:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK31: omp.inner.for.body7: -// CHECK31-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK31-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK31-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] // CHECK31-NEXT: store i32 [[ADD9]], ptr [[I]], align 4 // CHECK31-NEXT: [[B10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 1 -// CHECK31-NEXT: [[TMP12:%.*]] = load float, ptr [[B10]], align 4 +// CHECK31-NEXT: [[TMP12:%.*]] = load float, ptr [[B10]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[CONV11:%.*]] = fptosi float [[TMP12]] to i32 // CHECK31-NEXT: [[A12:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 -// CHECK31-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4 +// CHECK31-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [123 x i32], ptr [[A12]], i32 0, i32 [[TMP13]] // CHECK31-NEXT: store i32 [[CONV11]], ptr [[ARRAYIDX13]], align 4 // CHECK31-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] // CHECK31: omp.body.continue14: // CHECK31-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK31: omp.inner.for.inc15: -// CHECK31-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK31-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP14]], 1 // CHECK31-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK31-NEXT: br label [[OMP_INNER_FOR_COND5]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK31-NEXT: br label [[OMP_INNER_FOR_COND5]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK31: omp.inner.for.end17: // CHECK31-NEXT: br label [[OMP_IF_END]] // CHECK31: omp_if.end: // CHECK31-NEXT: store i32 123, ptr [[I]], align 4 // CHECK31-NEXT: [[A18:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK31-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds [123 x i32], ptr [[A18]], i32 0, i32 0 -// CHECK31-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX19]], align 4 +// CHECK31-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX19]], align 4, !noundef [[NOUNDEF3]] // CHECK31-NEXT: ret i32 [[TMP15]] // // @@ -3051,15 +3051,15 @@ // CHECK33-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK33-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK33-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK33-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK33-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK33-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK33-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK33-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK33-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK33-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4 +// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4 -// CHECK33-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK33-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4 // CHECK33-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 24, i1 false) // CHECK33-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 @@ -3085,14 +3085,14 @@ // CHECK33-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK33-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK33-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK33-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4 +// CHECK33-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK33-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK33-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK33-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK33-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK33-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK33-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1 // CHECK33-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64 // CHECK33-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -3121,7 +3121,7 @@ // CHECK33-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK33-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK33: omp_offload.cont: -// CHECK33-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK33-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP34]]) // CHECK33-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK33-NEXT: [[TMP35:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -3139,7 +3139,7 @@ // CHECK33-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 // CHECK33-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK33-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK33-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK33-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK33-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]]) // CHECK33-NEXT: ret void @@ -3169,77 +3169,77 @@ // CHECK33-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK33-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK33-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 -// CHECK33-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK33-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK33-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK33-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK33-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK33-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK33-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK33-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK33-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK33: omp.precond.then: // CHECK33-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK33-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK33-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK33-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK33-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK33-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK33-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK33-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK33-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK33-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK33-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] +// CHECK33-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK33-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK33: cond.true: -// CHECK33-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK33-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: br label [[COND_END:%.*]] // CHECK33: cond.false: -// CHECK33-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK33-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: br label [[COND_END]] // CHECK33: cond.end: // CHECK33-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK33-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK33-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK33-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK33-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK33: omp.inner.for.cond: -// CHECK33-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK33-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK33-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK33-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK33-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK33: omp.inner.for.body: -// CHECK33-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK33-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK33-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK33-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK33-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK33-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK33-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 // CHECK33-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]] -// CHECK33-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK33-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK33-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK33: omp.body.continue: // CHECK33-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK33: omp.inner.for.inc: -// CHECK33-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK33-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK33-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK33-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK33-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK33-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK33: omp.inner.for.end: // CHECK33-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK33: omp.loop.exit: // CHECK33-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK33-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK33-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK33-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK33-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK33-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK33: .omp.final.then: -// CHECK33-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK33-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK33-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -3268,12 +3268,12 @@ // CHECK33-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK33-NEXT: store i32 0, ptr [[TE]], align 4 // CHECK33-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK33-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK33-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP0]], ptr [[TE_CASTED]], align 4 -// CHECK33-NEXT: [[TMP1:%.*]] = load i64, ptr [[TE_CASTED]], align 8 -// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK33-NEXT: [[TMP1:%.*]] = load i64, ptr [[TE_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP2]], ptr [[TH_CASTED]], align 4 -// CHECK33-NEXT: [[TMP3:%.*]] = load i64, ptr [[TH_CASTED]], align 8 +// CHECK33-NEXT: [[TMP3:%.*]] = load i64, ptr [[TH_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK33-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 // CHECK33-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -3294,7 +3294,7 @@ // CHECK33-NEXT: store ptr null, ptr [[TMP12]], align 8 // CHECK33-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK33-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK33-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK33-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK33-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 // CHECK33-NEXT: store i32 1, ptr [[TMP16]], align 4 @@ -3335,8 +3335,8 @@ // CHECK33-NEXT: store i64 [[TH]], ptr [[TH_ADDR]], align 8 // CHECK33-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK33-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK33-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, ptr [[TMP1]]) // CHECK33-NEXT: ret void @@ -3364,49 +3364,49 @@ // CHECK33-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK33-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK33-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK33-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK33-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK33-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK33: cond.true: // CHECK33-NEXT: br label [[COND_END:%.*]] // CHECK33: cond.false: -// CHECK33-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK33-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: br label [[COND_END]] // CHECK33: cond.end: // CHECK33-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK33-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK33-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK33-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK33-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK33: omp.inner.for.cond: -// CHECK33-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] -// CHECK33-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK33-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK33-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK33-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK33: omp.inner.for.body: -// CHECK33-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK33-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK33-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK33-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK33-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK33-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK33-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 // CHECK33-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK33-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK33-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK33-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK33: omp.body.continue: // CHECK33-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK33: omp.inner.for.inc: -// CHECK33-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] +// CHECK33-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK33-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] -// CHECK33-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK33-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK33-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK33: omp.inner.for.end: // CHECK33-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK33: omp.loop.exit: // CHECK33-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK33-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK33-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK33-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK33-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK33: .omp.final.then: @@ -3444,14 +3444,14 @@ // CHECK35-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK35-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK35-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK35-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK35-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK35-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK35-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP2]], ptr [[N_CASTED]], align 4 -// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4 // CHECK35-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 // CHECK35-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 24, i1 false) @@ -3478,14 +3478,14 @@ // CHECK35-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK35-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK35-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK35-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4 +// CHECK35-NEXT: [[TMP19:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP19]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK35-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK35-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK35-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK35-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK35-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK35-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], 1 // CHECK35-NEXT: [[TMP22:%.*]] = zext i32 [[ADD]] to i64 // CHECK35-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -3514,7 +3514,7 @@ // CHECK35-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK35-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK35: omp_offload.cont: -// CHECK35-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK35-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP34]]) // CHECK35-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK35-NEXT: [[TMP35:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -3532,7 +3532,7 @@ // CHECK35-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK35-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK35-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK35-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 3, ptr @.omp_outlined., ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]]) // CHECK35-NEXT: ret void @@ -3562,76 +3562,76 @@ // CHECK35-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK35-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK35-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 -// CHECK35-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK35-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK35-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK35-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK35-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK35-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK35-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK35-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK35-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK35: omp.precond.then: // CHECK35-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK35-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK35-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK35-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK35-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK35-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK35-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK35-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK35-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK35-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK35-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] +// CHECK35-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK35-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK35: cond.true: -// CHECK35-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK35-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: br label [[COND_END:%.*]] // CHECK35: cond.false: -// CHECK35-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK35-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: br label [[COND_END]] // CHECK35: cond.end: // CHECK35-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK35-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK35-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK35-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 // CHECK35-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK35: omp.inner.for.cond: -// CHECK35-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK35-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK35-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK35-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK35-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK35: omp.inner.for.body: -// CHECK35-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK35-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK35-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK35-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK35-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK35-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK35-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP17]] -// CHECK35-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK35-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK35-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK35: omp.body.continue: // CHECK35-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK35: omp.inner.for.inc: -// CHECK35-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK35-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 -// CHECK35-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK35-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK35-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK35-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK35: omp.inner.for.end: // CHECK35-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK35: omp.loop.exit: // CHECK35-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK35-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4 +// CHECK35-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP20]]) -// CHECK35-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK35-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 // CHECK35-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK35: .omp.final.then: -// CHECK35-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 // CHECK35-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 // CHECK35-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 @@ -3660,12 +3660,12 @@ // CHECK35-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK35-NEXT: store i32 0, ptr [[TE]], align 4 // CHECK35-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK35-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP0]], ptr [[TE_CASTED]], align 4 -// CHECK35-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE_CASTED]], align 4 -// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK35-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP2]], ptr [[TH_CASTED]], align 4 -// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_CASTED]], align 4 +// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK35-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 // CHECK35-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -3686,7 +3686,7 @@ // CHECK35-NEXT: store ptr null, ptr [[TMP12]], align 4 // CHECK35-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK35-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK35-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK35-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK35-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 // CHECK35-NEXT: store i32 1, ptr [[TMP16]], align 4 @@ -3727,8 +3727,8 @@ // CHECK35-NEXT: store i32 [[TH]], ptr [[TH_ADDR]], align 4 // CHECK35-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK35-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK35-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, ptr [[TMP1]]) // CHECK35-NEXT: ret void @@ -3756,48 +3756,48 @@ // CHECK35-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK35-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK35-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK35-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK35-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK35-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK35: cond.true: // CHECK35-NEXT: br label [[COND_END:%.*]] // CHECK35: cond.false: -// CHECK35-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK35-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: br label [[COND_END]] // CHECK35: cond.end: // CHECK35-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK35-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK35-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK35-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK35-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK35: omp.inner.for.cond: -// CHECK35-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] -// CHECK35-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK35-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK35-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK35-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK35: omp.inner.for.body: -// CHECK35-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK35-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK35-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK35-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK35-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK35-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK35-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP9]] -// CHECK35-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK35-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK35-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK35: omp.body.continue: // CHECK35-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK35: omp.inner.for.inc: -// CHECK35-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK35-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK35-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] -// CHECK35-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK35-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK35-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK35: omp.inner.for.end: // CHECK35-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK35: omp.loop.exit: // CHECK35-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK35-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK35-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK35-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK35-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK35: .omp.final.then: @@ -3836,18 +3836,18 @@ // CHECK37-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK37-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK37-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK37-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK37-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK37-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK37-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK37-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK37-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK37-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP3]], ptr [[ARGC_CASTED]], align 4 -// CHECK37-NEXT: [[TMP4:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8 -// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[N]], align 4 +// CHECK37-NEXT: [[TMP4:%.*]] = load i64, ptr [[ARGC_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP5]], ptr [[N_CASTED]], align 4 -// CHECK37-NEXT: [[TMP6:%.*]] = load i64, ptr [[N_CASTED]], align 8 +// CHECK37-NEXT: [[TMP6:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP1]], 4 // CHECK37-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 32, i1 false) // CHECK37-NEXT: [[TMP8:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 @@ -3879,14 +3879,14 @@ // CHECK37-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK37-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK37-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK37-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4 +// CHECK37-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK37-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK37-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK37-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK37-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK37-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK37-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK37-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], 1 // CHECK37-NEXT: [[TMP27:%.*]] = zext i32 [[ADD]] to i64 // CHECK37-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -3915,7 +3915,7 @@ // CHECK37-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK37-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK37: omp_offload.cont: -// CHECK37-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK37-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP39]]) // CHECK37-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK37-NEXT: [[TMP40:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -3937,17 +3937,17 @@ // CHECK37-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8 // CHECK37-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK37-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK37-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK37-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK37-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK37-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK37-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK37-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP3]] to i1 // CHECK37-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK37-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK37-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK37-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK37-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined., ptr [[N_ADDR]], i64 [[TMP0]], ptr [[TMP1]], i64 [[TMP4]]) // CHECK37-NEXT: ret void // @@ -3978,85 +3978,85 @@ // CHECK37-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK37-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK37-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 -// CHECK37-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 +// CHECK37-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK37-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK37-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK37-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK37-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK37-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK37-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK37-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK37: omp.precond.then: // CHECK37-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK37-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK37-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK37-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK37-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK37-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK37-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK37-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK37-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK37-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK37-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK37-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK37: cond.true: -// CHECK37-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK37-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: br label [[COND_END:%.*]] // CHECK37: cond.false: -// CHECK37-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK37-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: br label [[COND_END]] // CHECK37: cond.end: // CHECK37-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK37-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK37-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK37-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK37-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK37-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK37-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK37: omp_if.then: // CHECK37-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK37: omp.inner.for.cond: -// CHECK37-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]] -// CHECK37-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK37-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK37-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK37: omp.inner.for.body: -// CHECK37-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK37-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK37-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK37-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK37-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK37-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK37-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64 // CHECK37-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]] -// CHECK37-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK37-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK37-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK37: omp.body.continue: // CHECK37-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK37: omp.inner.for.inc: -// CHECK37-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK37-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK37-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] -// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK37-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK37: omp.inner.for.end: // CHECK37-NEXT: br label [[OMP_IF_END:%.*]] // CHECK37: omp_if.else: // CHECK37-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK37: omp.inner.for.cond8: -// CHECK37-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK37-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK37-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK37-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]] // CHECK37: omp.inner.for.body10: -// CHECK37-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK37-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP22]], 1 // CHECK37-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] // CHECK37-NEXT: store i32 [[ADD12]], ptr [[I4]], align 4 -// CHECK37-NEXT: [[TMP23:%.*]] = load i32, ptr [[I4]], align 4 +// CHECK37-NEXT: [[TMP23:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP23]] to i64 // CHECK37-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM13]] // CHECK37-NEXT: store i32 0, ptr [[ARRAYIDX14]], align 4 @@ -4064,23 +4064,23 @@ // CHECK37: omp.body.continue15: // CHECK37-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] // CHECK37: omp.inner.for.inc16: -// CHECK37-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK37-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK37-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 -// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK37: omp.inner.for.end18: // CHECK37-NEXT: br label [[OMP_IF_END]] // CHECK37: omp_if.end: // CHECK37-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK37: omp.loop.exit: // CHECK37-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK37-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK37-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK37-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK37-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK37-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK37: .omp.final.then: -// CHECK37-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK37-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP29]], 0 // CHECK37-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK37-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 @@ -4109,12 +4109,12 @@ // CHECK37-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK37-NEXT: store i32 0, ptr [[TE]], align 4 // CHECK37-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK37-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK37-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP0]], ptr [[TE_CASTED]], align 4 -// CHECK37-NEXT: [[TMP1:%.*]] = load i64, ptr [[TE_CASTED]], align 8 -// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK37-NEXT: [[TMP1:%.*]] = load i64, ptr [[TE_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP2]], ptr [[TH_CASTED]], align 4 -// CHECK37-NEXT: [[TMP3:%.*]] = load i64, ptr [[TH_CASTED]], align 8 +// CHECK37-NEXT: [[TMP3:%.*]] = load i64, ptr [[TH_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK37-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 // CHECK37-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -4135,7 +4135,7 @@ // CHECK37-NEXT: store ptr null, ptr [[TMP12]], align 8 // CHECK37-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK37-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK37-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK37-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK37-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 // CHECK37-NEXT: store i32 1, ptr [[TMP16]], align 4 @@ -4176,8 +4176,8 @@ // CHECK37-NEXT: store i64 [[TH]], ptr [[TH_ADDR]], align 8 // CHECK37-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK37-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK37-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, ptr [[TMP1]]) // CHECK37-NEXT: ret void @@ -4205,49 +4205,49 @@ // CHECK37-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK37-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK37-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK37-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK37-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK37-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK37: cond.true: // CHECK37-NEXT: br label [[COND_END:%.*]] // CHECK37: cond.false: -// CHECK37-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK37-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: br label [[COND_END]] // CHECK37: cond.end: // CHECK37-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK37-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK37-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK37-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK37: omp.inner.for.cond: -// CHECK37-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] -// CHECK37-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK37-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK37-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK37-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK37: omp.inner.for.body: -// CHECK37-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK37-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK37-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK37-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK37-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK37-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK37-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 // CHECK37-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK37-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK37-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]] // CHECK37-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK37: omp.body.continue: // CHECK37-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK37: omp.inner.for.inc: -// CHECK37-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK37-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]], !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK37-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] -// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] +// CHECK37-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK37-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] // CHECK37: omp.inner.for.end: // CHECK37-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK37: omp.loop.exit: // CHECK37-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK37-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK37-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK37-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK37-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK37: .omp.final.then: @@ -4286,17 +4286,17 @@ // CHECK39-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK39-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK39-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK39-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK39-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK39-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK39-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP2]], ptr [[ARGC_CASTED]], align 4 -// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_CASTED]], align 4 -// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4 -// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4 +// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP6:%.*]] = mul nuw i32 [[TMP0]], 4 // CHECK39-NEXT: [[TMP7:%.*]] = sext i32 [[TMP6]] to i64 // CHECK39-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 32, i1 false) @@ -4329,14 +4329,14 @@ // CHECK39-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK39-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK39-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK39-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4 +// CHECK39-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK39-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK39-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK39-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK39-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK39-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK39-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK39-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], 1 // CHECK39-NEXT: [[TMP27:%.*]] = zext i32 [[ADD]] to i64 // CHECK39-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 @@ -4365,7 +4365,7 @@ // CHECK39-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP0]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK39-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK39: omp_offload.cont: -// CHECK39-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK39-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP39]]) // CHECK39-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK39-NEXT: [[TMP40:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -4387,17 +4387,17 @@ // CHECK39-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4 // CHECK39-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK39-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK39-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK39-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK39-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK39-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP3]] to i1 // CHECK39-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL1]] to i8 // CHECK39-NEXT: store i8 [[FROMBOOL2]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1 -// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 4, ptr @.omp_outlined., ptr [[N_ADDR]], i32 [[TMP0]], ptr [[TMP1]], i32 [[TMP4]]) // CHECK39-NEXT: ret void // @@ -4428,107 +4428,107 @@ // CHECK39-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK39-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK39-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 -// CHECK39-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 +// CHECK39-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK39-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK39-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK39-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK39-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK39-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK39: omp.precond.then: // CHECK39-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK39-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK39-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK39-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK39-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK39-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK39-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4 +// CHECK39-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK39-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 -// CHECK39-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK39-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] // CHECK39-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK39: cond.true: -// CHECK39-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK39-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: br label [[COND_END:%.*]] // CHECK39: cond.false: -// CHECK39-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK39-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: br label [[COND_END]] // CHECK39: cond.end: // CHECK39-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK39-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK39-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK39-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4 -// CHECK39-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1 +// CHECK39-NEXT: [[TMP14:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP14]] to i1 // CHECK39-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK39: omp_if.then: // CHECK39-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK39: omp.inner.for.cond: -// CHECK39-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK39-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK39-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK39-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK39: omp.inner.for.body: -// CHECK39-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK39-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK39-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK39-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK39-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK39-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK39-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP18]] -// CHECK39-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK39-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK39-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK39: omp.body.continue: // CHECK39-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK39: omp.inner.for.inc: -// CHECK39-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK39-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1 -// CHECK39-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK39-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK39: omp.inner.for.end: // CHECK39-NEXT: br label [[OMP_IF_END:%.*]] // CHECK39: omp_if.else: // CHECK39-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK39: omp.inner.for.cond8: -// CHECK39-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK39-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK39-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] // CHECK39-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK39: omp.inner.for.body10: -// CHECK39-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK39-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP22]], 1 // CHECK39-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] // CHECK39-NEXT: store i32 [[ADD12]], ptr [[I4]], align 4 -// CHECK39-NEXT: [[TMP23:%.*]] = load i32, ptr [[I4]], align 4 +// CHECK39-NEXT: [[TMP23:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 [[TMP23]] // CHECK39-NEXT: store i32 0, ptr [[ARRAYIDX13]], align 4 // CHECK39-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] // CHECK39: omp.body.continue14: // CHECK39-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK39: omp.inner.for.inc15: -// CHECK39-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK39-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP24]], 1 // CHECK39-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK39: omp.inner.for.end17: // CHECK39-NEXT: br label [[OMP_IF_END]] // CHECK39: omp_if.end: // CHECK39-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK39: omp.loop.exit: // CHECK39-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK39-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4 +// CHECK39-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP26]]) -// CHECK39-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK39-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 // CHECK39-NEXT: br i1 [[TMP28]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK39: .omp.final.then: -// CHECK39-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK39-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[SUB18:%.*]] = sub nsw i32 [[TMP29]], 0 // CHECK39-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1 // CHECK39-NEXT: [[MUL20:%.*]] = mul nsw i32 [[DIV19]], 1 @@ -4557,12 +4557,12 @@ // CHECK39-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK39-NEXT: store i32 0, ptr [[TE]], align 4 // CHECK39-NEXT: store i32 128, ptr [[TH]], align 4 -// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK39-NEXT: [[TMP0:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP0]], ptr [[TE_CASTED]], align 4 -// CHECK39-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE_CASTED]], align 4 -// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4 +// CHECK39-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP2]], ptr [[TH_CASTED]], align 4 -// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_CASTED]], align 4 +// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK39-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 // CHECK39-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 @@ -4583,7 +4583,7 @@ // CHECK39-NEXT: store ptr null, ptr [[TMP12]], align 4 // CHECK39-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK39-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK39-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4 +// CHECK39-NEXT: [[TMP15:%.*]] = load i32, ptr [[TE]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK39-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 // CHECK39-NEXT: store i32 1, ptr [[TMP16]], align 4 @@ -4624,8 +4624,8 @@ // CHECK39-NEXT: store i32 [[TH]], ptr [[TH_ADDR]], align 4 // CHECK39-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK39-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4 -// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4 +// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TE_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[TH_ADDR]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB2]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]]) // CHECK39-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined..1, ptr [[TMP1]]) // CHECK39-NEXT: ret void @@ -4653,48 +4653,48 @@ // CHECK39-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK39-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK39-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK39-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK39-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 // CHECK39-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK39: cond.true: // CHECK39-NEXT: br label [[COND_END:%.*]] // CHECK39: cond.false: -// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK39-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: br label [[COND_END]] // CHECK39: cond.end: // CHECK39-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK39-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK39-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK39-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK39: omp.inner.for.cond: -// CHECK39-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14:![0-9]+]] -// CHECK39-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK39-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK39-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK39-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK39: omp.inner.for.body: -// CHECK39-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK39-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK39-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK39-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK39-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK39-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK39-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP9]] -// CHECK39-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK39-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK39-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK39: omp.body.continue: // CHECK39-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK39: omp.inner.for.inc: -// CHECK39-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] +// CHECK39-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]], !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 -// CHECK39-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP14]] -// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP15:![0-9]+]] +// CHECK39-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP15]] +// CHECK39-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK39: omp.inner.for.end: // CHECK39-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK39: omp.loop.exit: // CHECK39-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK39-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK39-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK39-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK39-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK39: .omp.final.then: @@ -4732,54 +4732,54 @@ // CHECK41-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK41-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK41-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK41-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK41-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK41-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK41-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK41-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK41-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK41-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK41-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4 +// CHECK41-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK41-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK41-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK41-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK41-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK41-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK41-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK41-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK41-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK41-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK41-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK41-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]] // CHECK41-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK41: simd.if.then: -// CHECK41-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK41-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 // CHECK41-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK41: omp.inner.for.cond: -// CHECK41-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK41-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK41-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK41-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] // CHECK41-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK41: omp.inner.for.body: -// CHECK41-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK41-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 // CHECK41-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK41-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK41-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK41-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK41-NEXT: [[TMP11:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK41-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM]] -// CHECK41-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK41-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK41-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK41: omp.body.continue: // CHECK41-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK41: omp.inner.for.inc: -// CHECK41-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK41-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK41-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK41-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK41-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK41-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK41: omp.inner.for.end: -// CHECK41-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK41-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0 // CHECK41-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK41-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 @@ -4787,7 +4787,7 @@ // CHECK41-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4 // CHECK41-NEXT: br label [[SIMD_IF_END]] // CHECK41: simd.if.end: -// CHECK41-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK41-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP14]]) // CHECK41-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK41-NEXT: [[TMP15:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -4813,31 +4813,31 @@ // CHECK41-NEXT: store i32 128, ptr [[TH]], align 4 // CHECK41-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK41-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK41-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK41-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK41-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK41-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK41: omp.inner.for.cond: -// CHECK41-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] -// CHECK41-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK41-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK41-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK41-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK41: omp.inner.for.body: -// CHECK41-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK41-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK41-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK41-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK41-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK41-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK41-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK41-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK41-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK41-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK41-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK41: omp.body.continue: // CHECK41-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK41: omp.inner.for.inc: -// CHECK41-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK41-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK41-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK41-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] -// CHECK41-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK41-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK41-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK41: omp.inner.for.end: // CHECK41-NEXT: store i32 10, ptr [[I]], align 4 // CHECK41-NEXT: ret i32 0 @@ -4864,52 +4864,52 @@ // CHECK43-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK43-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK43-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK43-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK43-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK43-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK43-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK43-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK43-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK43-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4 +// CHECK43-NEXT: [[TMP2:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK43-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK43-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 // CHECK43-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK43-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK43-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4 // CHECK43-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK43-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK43-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4 // CHECK43-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK43-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK43-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] // CHECK43-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK43: simd.if.then: -// CHECK43-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK43-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 // CHECK43-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK43: omp.inner.for.cond: -// CHECK43-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK43-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK43-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK43-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK43-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK43: omp.inner.for.body: -// CHECK43-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK43-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 // CHECK43-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK43-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK43-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK43-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK43-NEXT: [[TMP10:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP10]] -// CHECK43-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK43-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK43-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK43: omp.body.continue: // CHECK43-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK43: omp.inner.for.inc: -// CHECK43-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK43-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK43-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK43-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK43-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK43-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK43: omp.inner.for.end: -// CHECK43-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK43-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0 // CHECK43-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK43-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 @@ -4917,7 +4917,7 @@ // CHECK43-NEXT: store i32 [[ADD9]], ptr [[I3]], align 4 // CHECK43-NEXT: br label [[SIMD_IF_END]] // CHECK43: simd.if.end: -// CHECK43-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK43-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP13]]) // CHECK43-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK43-NEXT: [[TMP14:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -4943,30 +4943,30 @@ // CHECK43-NEXT: store i32 128, ptr [[TH]], align 4 // CHECK43-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK43-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK43-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK43-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK43-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK43-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK43: omp.inner.for.cond: -// CHECK43-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] -// CHECK43-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK43-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK43-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK43-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK43: omp.inner.for.body: -// CHECK43-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK43-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK43-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK43-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK43-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK43-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK43-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP4]] -// CHECK43-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK43-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK43-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK43: omp.body.continue: // CHECK43-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK43: omp.inner.for.inc: -// CHECK43-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK43-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK43-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK43-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] -// CHECK43-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK43-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK43-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK43: omp.inner.for.end: // CHECK43-NEXT: store i32 10, ptr [[I]], align 4 // CHECK43-NEXT: ret i32 0 @@ -4994,75 +4994,75 @@ // CHECK45-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK45-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK45-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK45-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK45-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK45-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK45-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK45-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 8 // CHECK45-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 // CHECK45-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 -// CHECK45-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK45-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK45-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK45-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK45-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK45-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK45-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK45-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP5]], 0 // CHECK45-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK45-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK45-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK45-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK45-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK45-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4 // CHECK45-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK45-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK45-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP7]] // CHECK45-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK45: simd.if.then: -// CHECK45-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK45-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: store i32 [[TMP8]], ptr [[DOTOMP_IV]], align 4 -// CHECK45-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK45-NEXT: [[TMP9:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK45-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK45: omp_if.then: // CHECK45-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK45: omp.inner.for.cond: -// CHECK45-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] -// CHECK45-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK45-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK45-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP10]], [[TMP11]] // CHECK45-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK45: omp.inner.for.body: -// CHECK45-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK45-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1 // CHECK45-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK45-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK45-NEXT: [[TMP13:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK45-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK45-NEXT: [[TMP13:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 // CHECK45-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM]] -// CHECK45-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK45-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK45-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK45: omp.body.continue: // CHECK45-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK45: omp.inner.for.inc: -// CHECK45-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] +// CHECK45-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP14]], 1 -// CHECK45-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] -// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK45-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK45: omp.inner.for.end: // CHECK45-NEXT: br label [[OMP_IF_END:%.*]] // CHECK45: omp_if.else: // CHECK45-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK45: omp.inner.for.cond8: -// CHECK45-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK45-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK45-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] +// CHECK45-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] // CHECK45-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]] // CHECK45: omp.inner.for.body10: -// CHECK45-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK45-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP17]], 1 // CHECK45-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] // CHECK45-NEXT: store i32 [[ADD12]], ptr [[I4]], align 4 -// CHECK45-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4 +// CHECK45-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP18]] to i64 // CHECK45-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[IDXPROM13]] // CHECK45-NEXT: store i32 0, ptr [[ARRAYIDX14]], align 4 @@ -5070,14 +5070,14 @@ // CHECK45: omp.body.continue15: // CHECK45-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] // CHECK45: omp.inner.for.inc16: -// CHECK45-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK45-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK45-NEXT: store i32 [[ADD17]], ptr [[DOTOMP_IV]], align 4 -// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK45: omp.inner.for.end18: // CHECK45-NEXT: br label [[OMP_IF_END]] // CHECK45: omp_if.end: -// CHECK45-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK45-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK45-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK45-NEXT: [[MUL21:%.*]] = mul nsw i32 [[DIV20]], 1 @@ -5085,7 +5085,7 @@ // CHECK45-NEXT: store i32 [[ADD22]], ptr [[I4]], align 4 // CHECK45-NEXT: br label [[SIMD_IF_END]] // CHECK45: simd.if.end: -// CHECK45-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK45-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP21]]) // CHECK45-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK45-NEXT: [[TMP22:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -5111,31 +5111,31 @@ // CHECK45-NEXT: store i32 128, ptr [[TH]], align 4 // CHECK45-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK45-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK45-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK45-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK45-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK45-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK45: omp.inner.for.cond: -// CHECK45-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]] -// CHECK45-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK45-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK45-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK45-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK45: omp.inner.for.body: -// CHECK45-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK45-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK45-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK45-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] -// CHECK45-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK45-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK45-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 // CHECK45-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK45-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK45-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK45-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK45: omp.body.continue: // CHECK45-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK45: omp.inner.for.inc: -// CHECK45-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK45-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]], !noundef [[NOUNDEF2]] // CHECK45-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK45-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] -// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] +// CHECK45-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK45-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK45: omp.inner.for.end: // CHECK45-NEXT: store i32 10, ptr [[I]], align 4 // CHECK45-NEXT: ret i32 0 @@ -5163,87 +5163,87 @@ // CHECK47-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK47-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK47-NEXT: store i32 100, ptr [[N]], align 4 -// CHECK47-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK47-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK47-NEXT: [[TMP1:%.*]] = call ptr @llvm.stacksave() // CHECK47-NEXT: store ptr [[TMP1]], ptr [[SAVED_STACK]], align 4 // CHECK47-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 // CHECK47-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 -// CHECK47-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK47-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP2]], 0 // CHECK47-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK47-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR_]], align 1 -// CHECK47-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4 +// CHECK47-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK47-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK47-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 // CHECK47-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK47-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK47-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2]], align 4 // CHECK47-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 -// CHECK47-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK47-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB]], align 4 // CHECK47-NEXT: store i32 0, ptr [[I]], align 4 -// CHECK47-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK47-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]] // CHECK47-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] // CHECK47: simd.if.then: -// CHECK47-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK47-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_IV]], align 4 -// CHECK47-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1 +// CHECK47-NEXT: [[TMP8:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR_]], align 1, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK47-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK47: omp_if.then: // CHECK47-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK47: omp.inner.for.cond: -// CHECK47-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] -// CHECK47-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK47-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK47-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] // CHECK47-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK47: omp.inner.for.body: -// CHECK47-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK47-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 // CHECK47-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK47-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK47-NEXT: [[TMP12:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK47-NEXT: store i32 [[ADD]], ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK47-NEXT: [[TMP12:%.*]] = load i32, ptr [[I4]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP12]] -// CHECK47-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK47-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK47-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK47: omp.body.continue: // CHECK47-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK47: omp.inner.for.inc: -// CHECK47-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK47-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK47-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] -// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK47-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK47: omp.inner.for.end: // CHECK47-NEXT: br label [[OMP_IF_END:%.*]] // CHECK47: omp_if.else: // CHECK47-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK47: omp.inner.for.cond8: -// CHECK47-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 -// CHECK47-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK47-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// CHECK47-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK47-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END17:%.*]] // CHECK47: omp.inner.for.body10: -// CHECK47-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK47-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP16]], 1 // CHECK47-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] // CHECK47-NEXT: store i32 [[ADD12]], ptr [[I4]], align 4 -// CHECK47-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4 +// CHECK47-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP17]] // CHECK47-NEXT: store i32 0, ptr [[ARRAYIDX13]], align 4 // CHECK47-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] // CHECK47: omp.body.continue14: // CHECK47-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] // CHECK47: omp.inner.for.inc15: -// CHECK47-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 +// CHECK47-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK47-NEXT: store i32 [[ADD16]], ptr [[DOTOMP_IV]], align 4 -// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK47: omp.inner.for.end17: // CHECK47-NEXT: br label [[OMP_IF_END]] // CHECK47: omp_if.end: -// CHECK47-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK47-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0 // CHECK47-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1 // CHECK47-NEXT: [[MUL20:%.*]] = mul nsw i32 [[DIV19]], 1 @@ -5251,7 +5251,7 @@ // CHECK47-NEXT: store i32 [[ADD21]], ptr [[I4]], align 4 // CHECK47-NEXT: br label [[SIMD_IF_END]] // CHECK47: simd.if.end: -// CHECK47-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK47-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP20]]) // CHECK47-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK47-NEXT: [[TMP21:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -5277,30 +5277,30 @@ // CHECK47-NEXT: store i32 128, ptr [[TH]], align 4 // CHECK47-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK47-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4 -// CHECK47-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK47-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK47-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK47-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK47: omp.inner.for.cond: -// CHECK47-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9:![0-9]+]] -// CHECK47-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK47-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK47-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK47-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK47: omp.inner.for.body: -// CHECK47-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK47-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK47-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK47-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK47-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK47-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK47-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[A]], i32 0, i32 [[TMP4]] -// CHECK47-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK47-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK47-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK47: omp.body.continue: // CHECK47-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK47: omp.inner.for.inc: -// CHECK47-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] +// CHECK47-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]], !noundef [[NOUNDEF3]] // CHECK47-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 -// CHECK47-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP9]] -// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK47-NEXT: store i32 [[ADD1]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] +// CHECK47-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK47: omp.inner.for.end: // CHECK47-NEXT: store i32 10, ptr [[I]], align 4 // CHECK47-NEXT: ret i32 0 diff --git a/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp @@ -120,34 +120,34 @@ // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK1-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -155,8 +155,8 @@ // CHECK1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i64 0, i64 0 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK1-NEXT: ret i32 [[TMP18]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF4:![0-9]+]] +// CHECK1-NEXT: ret i32 [[TMP16]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -164,7 +164,7 @@ // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK1-NEXT: ret void // @@ -187,68 +187,68 @@ // CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8 -// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 456 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK1-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK1-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -288,34 +288,34 @@ // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[THIS1]], ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 56088, ptr [[TMP15]], align 8 -// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 56088, ptr [[TMP13]], align 8 +// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK3-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(ptr [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -323,8 +323,8 @@ // CHECK3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A3]], i32 0, i32 0 // CHECK3-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4 -// CHECK3-NEXT: ret i32 [[TMP18]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX4]], align 4, !noundef [[NOUNDEF5:![0-9]+]] +// CHECK3-NEXT: ret i32 [[TMP16]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 @@ -332,7 +332,7 @@ // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 1, ptr @.omp_outlined., ptr [[TMP0]]) // CHECK3-NEXT: ret void // @@ -355,66 +355,66 @@ // CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 56087 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 56087, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 456 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 456 // CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[TMP0]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP11]] -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP12]] -// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -455,51 +455,51 @@ // CHECK5-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK5-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK5-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP7]] to i64 // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 123, ptr [[I]], align 4 // CHECK5-NEXT: store i32 456, ptr [[J]], align 4 // CHECK5-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A10]], i64 0, i64 0 // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX11]], i64 0, i64 0 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4 +// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX12]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: ret i32 [[TMP9]] // // @@ -526,49 +526,49 @@ // CHECK7-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 56087, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 456 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP5]], 456 // CHECK7-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 456 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL4]] // CHECK7-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], ptr [[THIS1]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 123, ptr [[I]], align 4 // CHECK7-NEXT: store i32 456, ptr [[J]], align 4 // CHECK7-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], ptr [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds [123 x [456 x i32]], ptr [[A9]], i32 0, i32 0 // CHECK7-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [456 x i32], ptr [[ARRAYIDX10]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4 +// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: ret i32 [[TMP9]] // // @@ -590,18 +590,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x ptr], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK9-NEXT: store i32 100, ptr [[N]], align 4 // CHECK9-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK9-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK9-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -609,100 +609,100 @@ // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[N_CASTED]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP8]], ptr [[M_CASTED]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = load i64, ptr [[M_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP1]], [[TMP3]] // CHECK9-NEXT: [[TMP11:%.*]] = mul nuw i64 [[TMP10]], 4 // CHECK9-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[DOTOFFLOAD_SIZES]], ptr align 8 @.offload_sizes, i64 40, i1 false) -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store i64 [[TMP7]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK9-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP9]], ptr [[TMP20]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store ptr null, ptr [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK9-NEXT: store i64 [[TMP1]], ptr [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store ptr null, ptr [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store ptr null, ptr [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP25]], align 8 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK9-NEXT: store ptr null, ptr [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP3]], ptr [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store ptr null, ptr [[TMP32]], align 8 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[VLA]], ptr [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], ptr [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store ptr null, ptr [[TMP38]], align 8 -// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP42:%.*]] = load i32, ptr [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP43:%.*]] = load i32, ptr [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP43]], ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: store i32 [[TMP32]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 -// CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP45]], 0 -// CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 -// CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 -// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] -// CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 -// CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP46]], 1 +// CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 +// CHECK9-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP34]], 0 +// CHECK9-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 +// CHECK9-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 +// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] +// CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 +// CHECK9-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK9-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP35]], 1 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP47]], align 4 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 5, ptr [[TMP48]], align 4 -// CHECK9-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 8 -// CHECK9-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr [[TMP41]], ptr [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP54]], align 8 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP55]], align 8 -// CHECK9-NEXT: [[TMP56:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP57:%.*]] = icmp ne i32 [[TMP56]], 0 -// CHECK9-NEXT: br i1 [[TMP57]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP36]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 5, ptr [[TMP37]], align 4 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 8 +// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr [[TMP30]], ptr [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes, ptr [[TMP41]], align 8 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP42]], align 8 +// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 [[ADD]], ptr [[TMP44]], align 8 +// CHECK9-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK9-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP58]]) +// CHECK9-NEXT: [[TMP47:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP47]]) // CHECK9-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP59]]) -// CHECK9-NEXT: [[TMP60:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP60]] +// CHECK9-NEXT: [[TMP48:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(ptr [[TMP48]]) +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP49]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -718,8 +718,8 @@ // CHECK9-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR]], align 8 // CHECK9-NEXT: store i64 [[VLA1]], ptr [[VLA_ADDR2]], align 8 // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 8 // CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined., ptr [[N_ADDR]], ptr [[M_ADDR]], i64 [[TMP0]], i64 [[TMP1]], ptr [[TMP2]]) // CHECK9-NEXT: ret void @@ -758,18 +758,18 @@ // CHECK9-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 8 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8 -// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[VLA_ADDR]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP3:%.*]] = load i64, ptr [[VLA_ADDR2]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -778,46 +778,46 @@ // CHECK9-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK9-NEXT: store i32 0, ptr [[I]], align 4 // CHECK9-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK9-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK9: land.lhs.true: -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK9: omp.precond.then: // CHECK9-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_UB]], align 8 // CHECK9-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] // CHECK9-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: -// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK9-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] // CHECK9-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP22]], 0 // CHECK9-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK9-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] @@ -826,16 +826,16 @@ // CHECK9-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] // CHECK9-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK9-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 // CHECK9-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] // CHECK9-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 // CHECK9-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP24]], [[CONV25]] -// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 // CHECK9-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] @@ -845,40 +845,40 @@ // CHECK9-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 // CHECK9-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] // CHECK9-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 -// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group !5 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: [[TMP27:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP27]] to i64 // CHECK9-NEXT: [[TMP28:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i64 [[TMP28]] -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM36:%.*]] = sext i32 [[TMP29]] to i64 // CHECK9-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM36]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group !5 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX37]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD38:%.*]] = add nsw i64 [[TMP30]], 1 -// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !5 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK9-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP6]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP32]]) -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK9-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP35]], 0 // CHECK9-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK9-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 // CHECK9-NEXT: [[ADD42:%.*]] = add nsw i32 0, [[MUL41]] // CHECK9-NEXT: store i32 [[ADD42]], ptr [[I11]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK9-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP36]], 0 // CHECK9-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 // CHECK9-NEXT: [[MUL45:%.*]] = mul nsw i32 [[DIV44]], 1 @@ -904,34 +904,34 @@ // CHECK9-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: store ptr [[A]], ptr [[TMP0]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[A]], ptr [[TMP2]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store ptr null, ptr [[TMP4]], align 8 -// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: store ptr [[A]], ptr [[TMP1]], align 8 +// CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store ptr null, ptr [[TMP2]], align 8 +// CHECK9-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK9-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK9-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK9-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK9-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK9-NEXT: store ptr null, ptr [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK9-NEXT: store ptr null, ptr [[TMP14]], align 8 -// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK9-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, ptr [[KERNEL_ARGS]]) -// CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK9-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK9-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK9-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK9-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8 +// CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK9-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8 +// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK9-NEXT: store ptr @.offload_sizes.2, ptr [[TMP9]], align 8 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK9-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK9-NEXT: store ptr null, ptr [[TMP11]], align 8 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK9-NEXT: store ptr null, ptr [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK9-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, ptr [[KERNEL_ARGS]]) +// CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK9-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69(ptr [[A]]) #[[ATTR3]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -973,61 +973,61 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 2 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK9-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK9-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK9-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP12]] to i64 // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM7]] -// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: store i32 0, ptr [[ARRAYIDX8]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1072,109 +1072,109 @@ // CHECK11-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK11-NEXT: store i32 100, ptr [[N]], align 4 // CHECK11-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6:![0-9]+]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK11-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[M_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK11-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 // CHECK11-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 // CHECK11-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[DOTOFFLOAD_SIZES]], ptr align 4 @.offload_sizes, i32 40, i1 false) -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 [[TMP5]], ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK11-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 [[TMP7]], ptr [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store ptr null, ptr [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK11-NEXT: store i32 [[TMP0]], ptr [[TMP18]], align 4 +// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr null, ptr [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP21]], align 4 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr null, ptr [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP23]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 +// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK11-NEXT: store ptr null, ptr [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: store i32 [[TMP1]], ptr [[TMP29]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr null, ptr [[TMP31]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[VLA]], ptr [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], ptr [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr null, ptr [[TMP37]], align 4 -// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, ptr [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP41]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = load i32, ptr [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP42]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP43]], 0 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i64], ptr [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP30]], ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: store i32 [[TMP31]], ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP44]], 0 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP33]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP45:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP45]], 1 +// CHECK11-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP34]], 1 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 5, ptr [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP38]], ptr [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP39]], ptr [[TMP49]], align 4 -// CHECK11-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr [[TMP40]], ptr [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP51]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP54]], align 8 -// CHECK11-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 -// CHECK11-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 5, ptr [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP27]], ptr [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP28]], ptr [[TMP38]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr [[TMP29]], ptr [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes, ptr [[TMP40]], align 4 +// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP42]], align 4 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 [[ADD]], ptr [[TMP43]], align 8 +// CHECK11-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK11-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], ptr [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP57:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP57]]) +// CHECK11-NEXT: [[TMP46:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP46]]) // CHECK11-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP58:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP58]]) -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, ptr [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP59]] +// CHECK11-NEXT: [[TMP47:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(ptr [[TMP47]]) +// CHECK11-NEXT: [[TMP48:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP48]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -1190,8 +1190,8 @@ // CHECK11-NEXT: store i32 [[VLA]], ptr [[VLA_ADDR]], align 4 // CHECK11-NEXT: store i32 [[VLA1]], ptr [[VLA_ADDR2]], align 4 // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP0:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP1:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR]], align 4 // CHECK11-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB2]], i32 5, ptr @.omp_outlined., ptr [[N_ADDR]], ptr [[M_ADDR]], i32 [[TMP0]], i32 [[TMP1]], ptr [[TMP2]]) // CHECK11-NEXT: ret void @@ -1230,18 +1230,18 @@ // CHECK11-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[M_ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4 -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[VLA_ADDR]], align 4, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[VLA_ADDR2]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP4:%.*]] = load ptr, ptr [[A_ADDR]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_4]], align 4 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK11-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 @@ -1250,46 +1250,46 @@ // CHECK11-NEXT: store i64 [[SUB9]], ptr [[DOTCAPTURE_EXPR_5]], align 8 // CHECK11-NEXT: store i32 0, ptr [[I]], align 4 // CHECK11-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK11-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]] // CHECK11: land.lhs.true: -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]] // CHECK11: omp.precond.then: // CHECK11-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_UB]], align 8 // CHECK11-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP12:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1) -// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP13:%.*]] = icmp sgt i64 [[TMP14]], [[TMP15]] // CHECK11-NEXT: br i1 [[CMP13]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: -// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8 +// CHECK11-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_5]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8 +// CHECK11-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ [[TMP16]], [[COND_TRUE]] ], [ [[TMP17]], [[COND_FALSE]] ] // CHECK11-NEXT: store i64 [[COND]], ptr [[DOTOMP_UB]], align 8 -// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK11-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i64 [[TMP18]], ptr [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP20:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP19]], [[TMP20]] // CHECK11-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP21:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP22]], 0 // CHECK11-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1 // CHECK11-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]] @@ -1298,16 +1298,16 @@ // CHECK11-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]] // CHECK11-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK11-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP24:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK11-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1 // CHECK11-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]] // CHECK11-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64 // CHECK11-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP24]], [[CONV25]] -// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK11-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 // CHECK11-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]] @@ -1317,38 +1317,38 @@ // CHECK11-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1 // CHECK11-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]] // CHECK11-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32 -// CHECK11-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group !6 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: [[TMP27:%.*]] = load i32, ptr [[I11]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP28:%.*]] = mul nsw i32 [[TMP27]], [[TMP3]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 [[TMP28]] -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, ptr [[J12]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP29]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX36]], align 4, !llvm.access.group !6 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX36]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 +// CHECK11-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD37:%.*]] = add nsw i64 [[TMP30]], 1 -// CHECK11-NEXT: store i64 [[ADD37]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !6 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK11-NEXT: store i64 [[ADD37]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP7]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: [[TMP31:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4 +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, ptr [[TMP31]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP32]]) -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK11-NEXT: br i1 [[TMP34]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB38:%.*]] = sub nsw i32 [[TMP35]], 0 // CHECK11-NEXT: [[DIV39:%.*]] = sdiv i32 [[SUB38]], 1 // CHECK11-NEXT: [[MUL40:%.*]] = mul nsw i32 [[DIV39]], 1 // CHECK11-NEXT: [[ADD41:%.*]] = add nsw i32 0, [[MUL40]] // CHECK11-NEXT: store i32 [[ADD41]], ptr [[I11]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP36]], 0 // CHECK11-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1 // CHECK11-NEXT: [[MUL44:%.*]] = mul nsw i32 [[DIV43]], 1 @@ -1374,34 +1374,34 @@ // CHECK11-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: store ptr [[A]], ptr [[TMP0]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr [[A]], ptr [[TMP2]], align 4 -// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store ptr null, ptr [[TMP4]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr [[A]], ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store ptr null, ptr [[TMP2]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK11-NEXT: store i32 1, ptr [[TMP7]], align 4 -// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK11-NEXT: store i32 1, ptr [[TMP8]], align 4 -// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK11-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK11-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK11-NEXT: store ptr @.offload_sizes.2, ptr [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK11-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK11-NEXT: store ptr null, ptr [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK11-NEXT: store ptr null, ptr [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK11-NEXT: store i64 20, ptr [[TMP15]], align 8 -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, ptr [[KERNEL_ARGS]]) -// CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 -// CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK11-NEXT: store i32 1, ptr [[TMP5]], align 4 +// CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK11-NEXT: store i32 1, ptr [[TMP6]], align 4 +// CHECK11-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK11-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4 +// CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK11-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4 +// CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK11-NEXT: store ptr @.offload_sizes.2, ptr [[TMP9]], align 4 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK11-NEXT: store ptr @.offload_maptypes.3, ptr [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK11-NEXT: store ptr null, ptr [[TMP11]], align 4 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK11-NEXT: store ptr null, ptr [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK11-NEXT: store i64 20, ptr [[TMP13]], align 8 +// CHECK11-NEXT: [[TMP14:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, ptr [[KERNEL_ARGS]]) +// CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 +// CHECK11-NEXT: br i1 [[TMP15]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69(ptr [[A]]) #[[ATTR3]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1443,59 +1443,59 @@ // CHECK11-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK11-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK11-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 19 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK11: cond.true: // CHECK11-NEXT: br label [[COND_END:%.*]] // CHECK11: cond.false: -// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK11-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: br label [[COND_END]] // CHECK11: cond.end: // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 19, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK11-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK11-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: -// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: -// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 2 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] +// CHECK11-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[DIV3:%.*]] = sdiv i32 [[TMP10]], 2 // CHECK11-NEXT: [[MUL4:%.*]] = mul nsw i32 [[DIV3]], 2 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL4]] // CHECK11-NEXT: [[MUL5:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 0, [[MUL5]] -// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 [[ADD6]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP11]] -// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP12]] -// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: -// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK11-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1 -// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK11-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK11: omp.loop.exit: // CHECK11-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK11-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK11: .omp.final.then: @@ -1541,9 +1541,9 @@ // CHECK13-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 8 // CHECK13-NEXT: store i32 100, ptr [[N]], align 4 // CHECK13-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 // CHECK13-NEXT: [[TMP4:%.*]] = call ptr @llvm.stacksave() // CHECK13-NEXT: store ptr [[TMP4]], ptr [[SAVED_STACK]], align 8 @@ -1551,15 +1551,15 @@ // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP5]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], ptr [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR1]], align 8 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4 +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP6]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP9]], 0 // CHECK13-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK13-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -1567,29 +1567,29 @@ // CHECK13-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK13-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK13-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK13-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP10]], ptr [[DOTOMP_UB]], align 8 // CHECK13-NEXT: store i32 0, ptr [[I]], align 4 // CHECK13-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP11]] // CHECK13-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK13: land.lhs.true: -// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP12]] // CHECK13-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK13: simd.if.then: -// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i64 [[TMP13]], ptr [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP14]], [[TMP15]] // CHECK13-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP17]], 0 // CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK13-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -1598,16 +1598,16 @@ // CHECK13-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK13-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP20]], 0 // CHECK13-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK13-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK13-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK13-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP19]], [[CONV22]] -// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP21]], 0 // CHECK13-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -1617,31 +1617,31 @@ // CHECK13-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK13-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK13-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !2 -// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: [[TMP22:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP22]] to i64 // CHECK13-NEXT: [[TMP23:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP3]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i64 [[TMP23]] -// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM33:%.*]] = sext i32 [[TMP24]] to i64 // CHECK13-NEXT: [[ARRAYIDX34:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i64 [[IDXPROM33]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group !2 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX34]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 +// CHECK13-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD35:%.*]] = add nsw i64 [[TMP25]], 1 -// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !2 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK13-NEXT: store i64 [[ADD35]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP3]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK13: omp.inner.for.end: -// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP26]], 0 // CHECK13-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 // CHECK13-NEXT: [[MUL38:%.*]] = mul nsw i32 [[DIV37]], 1 // CHECK13-NEXT: [[ADD39:%.*]] = add nsw i32 0, [[MUL38]] // CHECK13-NEXT: store i32 [[ADD39]], ptr [[I9]], align 4 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK13-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP27]], 0 // CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 // CHECK13-NEXT: [[MUL42:%.*]] = mul nsw i32 [[DIV41]], 1 @@ -1649,7 +1649,7 @@ // CHECK13-NEXT: store i32 [[ADD43]], ptr [[J10]], align 4 // CHECK13-NEXT: br label [[SIMD_IF_END]] // CHECK13: simd.if.end: -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP28]]) // CHECK13-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK13-NEXT: [[TMP29:%.*]] = load ptr, ptr [[SAVED_STACK]], align 8 @@ -1673,43 +1673,43 @@ // CHECK13-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK13-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] // CHECK13-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: -// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: -// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK13-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK13-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK13-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i64 0, i64 [[IDXPROM]] -// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP7]] to i64 // CHECK13-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM6]] -// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: store i32 0, ptr [[ARRAYIDX7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: -// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK13-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] // CHECK13-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK13-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 10, ptr [[I]], align 4 // CHECK13-NEXT: store i32 2, ptr [[J]], align 4 @@ -1744,23 +1744,23 @@ // CHECK15-NEXT: store ptr [[ARGV]], ptr [[ARGV_ADDR]], align 4 // CHECK15-NEXT: store i32 100, ptr [[N]], align 4 // CHECK15-NEXT: store i32 2, ptr [[M]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4 -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3:![0-9]+]] +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP2:%.*]] = call ptr @llvm.stacksave() // CHECK15-NEXT: store ptr [[TMP2]], ptr [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 [[TMP0]], [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP3]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], ptr [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], ptr [[__VLA_EXPR1]], align 4 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4 +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[N]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP4]], ptr [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4 +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[M]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP6]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP7]], 0 // CHECK15-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK15-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 @@ -1768,29 +1768,29 @@ // CHECK15-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK15-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8 // CHECK15-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8 -// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK15-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_UB]], align 8 // CHECK15-NEXT: store i32 0, ptr [[I]], align 4 // CHECK15-NEXT: store i32 0, ptr [[J]], align 4 -// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] // CHECK15-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[SIMD_IF_END:%.*]] // CHECK15: land.lhs.true: -// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP10]] // CHECK15-NEXT: br i1 [[CMP8]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END]] // CHECK15: simd.if.then: -// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8 +// CHECK15-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i64 [[TMP11]], ptr [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP11:%.*]] = icmp sle i64 [[TMP12]], [[TMP13]] // CHECK15-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP15]], 0 // CHECK15-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 // CHECK15-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]] @@ -1799,16 +1799,16 @@ // CHECK15-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]] // CHECK15-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32 -// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP18]], 0 // CHECK15-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1 // CHECK15-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]] // CHECK15-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64 // CHECK15-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP17]], [[CONV22]] -// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP19]], 0 // CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 // CHECK15-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]] @@ -1818,29 +1818,29 @@ // CHECK15-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1 // CHECK15-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]] // CHECK15-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32 -// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group !3 -// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: [[TMP20:%.*]] = load i32, ptr [[I9]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 [[TMP20]], [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[VLA]], i32 [[TMP21]] -// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX33:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 [[TMP22]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group !3 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX33]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 +// CHECK15-NEXT: [[TMP23:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD34:%.*]] = add nsw i64 [[TMP23]], 1 -// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group !3 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK15-NEXT: store i64 [[ADD34]], ptr [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP4]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK15: omp.inner.for.end: -// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB35:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV36:%.*]] = sdiv i32 [[SUB35]], 1 // CHECK15-NEXT: [[MUL37:%.*]] = mul nsw i32 [[DIV36]], 1 // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 0, [[MUL37]] // CHECK15-NEXT: store i32 [[ADD38]], ptr [[I9]], align 4 -// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP25]], 0 // CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 // CHECK15-NEXT: [[MUL41:%.*]] = mul nsw i32 [[DIV40]], 1 @@ -1848,7 +1848,7 @@ // CHECK15-NEXT: store i32 [[ADD42]], ptr [[J10]], align 4 // CHECK15-NEXT: br label [[SIMD_IF_END]] // CHECK15: simd.if.end: -// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARGC_ADDR]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP26]]) // CHECK15-NEXT: store i32 [[CALL]], ptr [[RETVAL]], align 4 // CHECK15-NEXT: [[TMP27:%.*]] = load ptr, ptr [[SAVED_STACK]], align 4 @@ -1872,41 +1872,41 @@ // CHECK15-NEXT: store i32 [[ARGC]], ptr [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 19, ptr [[DOTOMP_UB]], align 4 -// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK15-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // CHECK15-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: -// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: -// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP3]], 2 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK15-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP5]], 2 // CHECK15-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 2 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], [[MUL3]] // CHECK15-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]] -// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 [[ADD5]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: [[TMP6:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [2 x i32]], ptr [[A]], i32 0, i32 [[TMP6]] -// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP7:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP7]] -// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: store i32 0, ptr [[ARRAYIDX6]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: -// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 +// CHECK15-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] // CHECK15-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP8]], 1 -// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK15-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 10, ptr [[I]], align 4 // CHECK15-NEXT: store i32 2, ptr [[J]], align 4 diff --git a/clang/test/OpenMP/teams_distribute_simd_reduction_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_reduction_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_reduction_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_reduction_codegen.cpp @@ -92,39 +92,39 @@ // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF5:![0-9]+]] // CHECK1-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP4]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63(i64 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -167,49 +167,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !5 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -218,21 +218,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: @@ -247,15 +247,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -271,41 +271,41 @@ // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) -// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK1-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8 -// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP3]], align 8 -// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: store i64 [[TMP2]], ptr [[TMP5]], align 8 -// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store ptr null, ptr [[TMP7]], align 8 -// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[T_VAR_CASTED]], align 8, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store ptr null, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK1-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK1-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK1-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 8 -// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK1-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK1-NEXT: store ptr null, ptr [[TMP16]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK1-NEXT: store ptr null, ptr [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK1-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK1-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK1-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK1-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK1-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK1-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK1-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK1-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP11]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK1-NEXT: store ptr null, ptr [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK1-NEXT: store ptr null, ptr [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK1-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP2]]) #[[ATTR2]] +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP1]]) #[[ATTR2]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: ret i32 0 @@ -345,49 +345,49 @@ // CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: -// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: -// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: -// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: -// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 +// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]], !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !11 -// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF5]] // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: @@ -396,21 +396,21 @@ // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK1-NEXT: ] // CHECK1: .omp.reduction.case1: -// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.case2: -// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK1: .omp.reduction.default: @@ -425,15 +425,15 @@ // CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF5]] +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK1-NEXT: ret void // // @@ -454,39 +454,39 @@ // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4 -// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF6:![0-9]+]] // CHECK3-NEXT: store i32 [[TMP0]], ptr [[SIVAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_CASTED]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP4]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP7]], ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP17]], align 8 -// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 -// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l63(i32 [[TMP1]]) #[[ATTR2:[0-9]+]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -529,49 +529,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -580,21 +580,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: @@ -609,15 +609,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -633,41 +633,41 @@ // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4 // CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) -// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK3-NEXT: store i32 [[TMP1]], ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4 -// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 [[TMP2]], ptr [[TMP5]], align 4 -// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store ptr null, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP0:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: store i32 [[TMP0]], ptr [[T_VAR_CASTED]], align 4 +// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[T_VAR_CASTED]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store ptr null, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 -// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 -// CHECK3-NEXT: store i32 1, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 -// CHECK3-NEXT: store i32 1, ptr [[TMP11]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 -// CHECK3-NEXT: store ptr [[TMP8]], ptr [[TMP12]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 -// CHECK3-NEXT: store ptr [[TMP9]], ptr [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 -// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 -// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 -// CHECK3-NEXT: store ptr null, ptr [[TMP16]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 -// CHECK3-NEXT: store ptr null, ptr [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 -// CHECK3-NEXT: store i64 2, ptr [[TMP18]], align 8 -// CHECK3-NEXT: [[TMP19:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) -// CHECK3-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0 -// CHECK3-NEXT: br i1 [[TMP20]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0 +// CHECK3-NEXT: store i32 1, ptr [[TMP7]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1 +// CHECK3-NEXT: store i32 1, ptr [[TMP8]], align 4 +// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2 +// CHECK3-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4 +// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3 +// CHECK3-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4 +// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP11]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5 +// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP12]], align 4 +// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6 +// CHECK3-NEXT: store ptr null, ptr [[TMP13]], align 4 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7 +// CHECK3-NEXT: store ptr null, ptr [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8 +// CHECK3-NEXT: store i64 2, ptr [[TMP15]], align 8 +// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 1, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]]) +// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP2]]) #[[ATTR2]] +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP1]]) #[[ATTR2]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: ret i32 0 @@ -707,49 +707,49 @@ // CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4 -// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: -// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: -// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: -// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: -// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 +// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]], !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 -// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !12 -// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] +// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP13]] +// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF6]] // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: @@ -758,21 +758,21 @@ // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 // CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP17:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) -// CHECK3-NEXT: switch i32 [[TMP17]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func.2, ptr @.gomp_critical_user_.reduction.var) +// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK3-NEXT: ] // CHECK3: .omp.reduction.case1: -// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] +// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.case2: -// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4 +// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4 // CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK3: .omp.reduction.default: @@ -787,15 +787,15 @@ // CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4 // CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4 -// CHECK3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 -// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4 +// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4 +// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4 -// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4 -// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF6]] +// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK3-NEXT: ret void // // @@ -819,36 +819,36 @@ // CHECK5-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]], !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !2 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF2]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() @@ -870,37 +870,37 @@ // CHECK5-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false) // CHECK5-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK5-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK5-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK5-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: -// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK5-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: -// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK5-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !6 +// CHECK5-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK5-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: -// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !6 -// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] +// CHECK5-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]], !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK5-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP7]] +// CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK5-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK5-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF2]] +// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK5-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK5-NEXT: ret i32 0 // @@ -918,36 +918,36 @@ // CHECK7-NEXT: store i32 0, ptr [[RETVAL]], align 4 // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[SIVAR]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] -// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: store i32 [[ADD1]], ptr [[SIVAR]], align 4, !llvm.access.group [[ACC_GRP4]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]], !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !3 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP4]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4 +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZZ4mainE5sivar, align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[SIVAR]], align 4, !noundef [[NOUNDEF3]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD3]], ptr @_ZZ4mainE5sivar, align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() @@ -969,37 +969,37 @@ // CHECK7-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false) // CHECK7-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4 -// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 -// CHECK7-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_IV]], align 4 +// CHECK7-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: store i32 [[TMP0]], ptr [[DOTOMP_IV]], align 4 // CHECK7-NEXT: store i32 0, ptr [[T_VAR1]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: -// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] +// CHECK7-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8:![0-9]+]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: -// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 +// CHECK7-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] -// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group !7 +// CHECK7-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP5:%.*]] = load i32, ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] +// CHECK7-NEXT: store i32 [[ADD2]], ptr [[T_VAR1]], align 4, !llvm.access.group [[ACC_GRP8]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: -// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 -// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !7 -// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] +// CHECK7-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]], !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP6]], 1 +// CHECK7-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP8]] +// CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP9:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: store i32 2, ptr [[I]], align 4 -// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR]], align 4 -// CHECK7-NEXT: [[TMP9:%.*]] = load i32, ptr [[T_VAR1]], align 4 -// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK7-NEXT: [[TMP7:%.*]] = load i32, ptr [[T_VAR]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[TMP8:%.*]] = load i32, ptr [[T_VAR1]], align 4, !noundef [[NOUNDEF3]] +// CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] // CHECK7-NEXT: store i32 [[ADD4]], ptr [[T_VAR]], align 4 // CHECK7-NEXT: ret i32 0 // @@ -1049,52 +1049,52 @@ // CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 // CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 -// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 +// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4, !noundef [[NOUNDEF4:![0-9]+]] // CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) -// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK9: cond.true: // CHECK9-NEXT: br label [[COND_END:%.*]] // CHECK9: cond.false: -// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 +// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: br label [[COND_END]] // CHECK9: cond.end: // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 -// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 +// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: -// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5:![0-9]+]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: -// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] -// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4, !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], ptr [[REF_TMP]], i32 0, i32 0 -// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group !4 -// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4 +// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group [[ACC_GRP5]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 +// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]], !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 -// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group !4 -// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP5]] +// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK9: omp.loop.exit: // CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4 +// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4, !noundef [[NOUNDEF4]] // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK9: .omp.final.then: @@ -1103,21 +1103,21 @@ // CHECK9: .omp.final.done: // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 // CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) -// CHECK9-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var) +// CHECK9-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ // CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] // CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] // CHECK9-NEXT: ] // CHECK9: .omp.reduction.case1: -// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4 -// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP19]], [[TMP20]] +// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] // CHECK9-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.case2: -// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[SIVAR1]], align 4 -// CHECK9-NEXT: [[TMP22:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4 +// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP20:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP19]] monotonic, align 4 // CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var) // CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] // CHECK9: .omp.reduction.default: @@ -1132,15 +1132,15 @@ // CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8 // CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8 // CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8 -// CHECK9-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 -// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8 +// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0 +// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8 +// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 // CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8 -// CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0 -// CHECK9-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4 -// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP7]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] -// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP10]], align 4 +// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4, !noundef [[NOUNDEF4]] +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] +// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4 // CHECK9-NEXT: ret void // // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/basic-cplusplus.cpp.expected b/clang/test/utils/update_cc_test_checks/Inputs/basic-cplusplus.cpp.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/basic-cplusplus.cpp.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/basic-cplusplus.cpp.expected @@ -15,9 +15,9 @@ // CHECK-NEXT: store %class.Foo* [[THIS:%.*]], %class.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i32 [[ARG:%.*]], i32* [[ARG_ADDR]], align 4 // CHECK-NEXT: [[THIS1:%.*]] = load %class.Foo*, %class.Foo** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[CLASS_FOO:%.*]], %class.Foo* [[THIS1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[X]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] // CHECK-NEXT: ret i32 [[ADD]] // @@ -34,7 +34,7 @@ // CHECK-NEXT: store %class.Foo* [[THIS:%.*]], %class.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i32 [[X:%.*]], i32* [[X_ADDR]], align 4 // CHECK-NEXT: [[THIS1:%.*]] = load %class.Foo*, %class.Foo** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @_ZN3FooC2Ei(%class.Foo* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK-NEXT: ret void // @@ -56,8 +56,8 @@ // CHECK-NEXT: store i32 [[ARG:%.*]], i32* [[ARG_ADDR]], align 4 // CHECK-NEXT: [[THIS1:%.*]] = load %class.Foo*, %class.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[CLASS_FOO:%.*]], %class.Foo* [[THIS1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X]], align 4, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP0]], [[TMP1]] // CHECK-NEXT: ret i32 [[SUB]] // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.funcattrs.expected b/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.funcattrs.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.funcattrs.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.funcattrs.expected @@ -16,7 +16,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[S_ADDR:%.*]] = alloca %struct.ST*, align 8 // CHECK-NEXT: store %struct.ST* [[S:%.*]], %struct.ST** [[S_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[S_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[S_ADDR]], align 8, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i64 1 // CHECK-NEXT: [[Z:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ARRAYIDX]], i32 0, i32 2 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_RT:%.*]], %struct.RT* [[Z]], i32 0, i32 1 diff --git a/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.plain.expected b/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.plain.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.plain.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/check-attributes.cpp.plain.expected @@ -15,7 +15,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[S_ADDR:%.*]] = alloca %struct.ST*, align 8 // CHECK-NEXT: store %struct.ST* [[S:%.*]], %struct.ST** [[S_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[S_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ST*, %struct.ST** [[S_ADDR]], align 8, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[TMP0]], i64 1 // CHECK-NEXT: [[Z:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.ST* [[ARRAYIDX]], i32 0, i32 2 // CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_RT:%.*]], %struct.RT* [[Z]], i32 0, i32 1 diff --git a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected @@ -23,7 +23,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i32 [[ARG:%.*]], i32* [[ARG_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARG_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: ret i32 [[TMP0]] // int foo(int arg) { diff --git a/clang/test/utils/update_cc_test_checks/Inputs/explicit-template-instantiation.cpp.expected b/clang/test/utils/update_cc_test_checks/Inputs/explicit-template-instantiation.cpp.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/explicit-template-instantiation.cpp.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/explicit-template-instantiation.cpp.expected @@ -43,7 +43,7 @@ // CHECK-NEXT: store %struct.Foo* [[THIS:%.*]], %struct.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i8 [[X:%.*]], i8* [[X_ADDR]], align 1 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo*, %struct.Foo** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[X_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[X_ADDR]], align 1, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: call void @_ZN3FooIcEC2Ec(%struct.Foo* noundef nonnull align 1 dereferenceable(1) [[THIS1]], i8 noundef signext [[TMP0]]) // CHECK-NEXT: ret void // @@ -61,7 +61,7 @@ // CHECK-NEXT: store %struct.Foo* [[THIS:%.*]], %struct.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo*, %struct.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], %struct.Foo* [[THIS1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[X]], align 1, !noundef [[NOUNDEF2]] // CHECK-NEXT: ret i8 [[TMP0]] // // CHECK-LABEL: @_ZN3FooIcE3setEc( @@ -71,7 +71,7 @@ // CHECK-NEXT: store %struct.Foo* [[THIS:%.*]], %struct.Foo** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i8 [[_X:%.*]], i8* [[_X_ADDR]], align 1 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo*, %struct.Foo** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[_X_ADDR]], align 1 +// CHECK-NEXT: [[TMP0:%.*]] = load i8, i8* [[_X_ADDR]], align 1, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_FOO:%.*]], %struct.Foo* [[THIS1]], i32 0, i32 0 // CHECK-NEXT: store i8 [[TMP0]], i8* [[X]], align 1 // CHECK-NEXT: ret void @@ -85,7 +85,7 @@ // CHECK-NEXT: store %struct.Foo.0* [[THIS:%.*]], %struct.Foo.0** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i16 [[X:%.*]], i16* [[X_ADDR]], align 2 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo.0*, %struct.Foo.0** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[X_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[X_ADDR]], align 2, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @_ZN3FooIsEC2Es(%struct.Foo.0* noundef nonnull align 2 dereferenceable(2) [[THIS1]], i16 noundef signext [[TMP0]]) // CHECK-NEXT: ret void // @@ -103,7 +103,7 @@ // CHECK-NEXT: store %struct.Foo.0* [[THIS:%.*]], %struct.Foo.0** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo.0*, %struct.Foo.0** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_FOO_0:%.*]], %struct.Foo.0* [[THIS1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[X]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[X]], align 2, !noundef [[NOUNDEF2]] // CHECK-NEXT: ret i16 [[TMP0]] // // CHECK-LABEL: @_ZN3FooIsE3setEs( @@ -113,7 +113,7 @@ // CHECK-NEXT: store %struct.Foo.0* [[THIS:%.*]], %struct.Foo.0** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i16 [[_X:%.*]], i16* [[_X_ADDR]], align 2 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Foo.0*, %struct.Foo.0** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[_X_ADDR]], align 2 +// CHECK-NEXT: [[TMP0:%.*]] = load i16, i16* [[_X_ADDR]], align 2, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_FOO_0:%.*]], %struct.Foo.0* [[THIS1]], i32 0, i32 0 // CHECK-NEXT: store i16 [[TMP0]], i16* [[X]], align 2 // CHECK-NEXT: ret void @@ -130,7 +130,7 @@ // CHECK-NEXT: store %struct.Bar* [[THIS:%.*]], %struct.Bar** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i32 [[X:%.*]], i32* [[X_ADDR]], align 4 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Bar*, %struct.Bar** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[X_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @_ZN3BarIiEC2Ei(%struct.Bar* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]) // CHECK-NEXT: ret void // @@ -159,7 +159,7 @@ // CHECK-NEXT: store i32 [[_X:%.*]], i32* [[_X_ADDR]], align 4 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Bar*, %struct.Bar** [[THIS_ADDR]], align 8 // CHECK-NEXT: [[FOO:%.*]] = getelementptr inbounds [[STRUCT_BAR:%.*]], %struct.Bar* [[THIS1]], i32 0, i32 0 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[_X_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[_X_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @_ZN3FooIiE3setEi(%struct.Foo.1* noundef nonnull align 4 dereferenceable(4) [[FOO]], i32 noundef [[TMP0]]) // CHECK-NEXT: ret void // @@ -175,7 +175,7 @@ // CHECK-NEXT: store %struct.Baz* [[THIS:%.*]], %struct.Baz** [[THIS_ADDR]], align 8 // CHECK-NEXT: store i64 [[X:%.*]], i64* [[X_ADDR]], align 8 // CHECK-NEXT: [[THIS1:%.*]] = load %struct.Baz*, %struct.Baz** [[THIS_ADDR]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[X_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[X_ADDR]], align 8, !noundef [[NOUNDEF2]] // CHECK-NEXT: call void @_ZN3BazIlEC2El(%struct.Baz* noundef nonnull align 8 dereferenceable(8) [[THIS1]], i64 noundef [[TMP0]]) // CHECK-NEXT: ret void // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected @@ -18,10 +18,10 @@ // CHECK-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK-NEXT: %{{somevar_[a-z0-9]+_}} = alloca i32, align 4 // CHECK-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // CHECK-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 -// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8, !noundef [[NOUNDEF3]] // CHECK-NEXT: call void @{{__omp_offloading_[a-z0-9]+_[a-z0-9]+_foo_l[0-9]+}}(i64 [[TMP1]]) #[[ATTR3:[0-9]+]] // CHECK-NEXT: call void @{{__test_offloading_[a-z0-9]+_[a-z0-9]+_bar_l[0-9]+}}() // CHECK-NEXT: ret void @@ -32,7 +32,7 @@ // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4, !noundef [[NOUNDEF3]] // CHECK-NEXT: call void @use(i32 noundef [[TMP0]]) // CHECK-NEXT: ret void // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected @@ -59,33 +59,33 @@ // OMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // OMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // OMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// OMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 +// OMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4, !noundef [[NOUNDEF3:![0-9]+]] // OMP-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) -// OMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 33554431 // OMP-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // OMP: cond.true: // OMP-NEXT: br label [[COND_END:%.*]] // OMP: cond.false: -// OMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: br label [[COND_END]] // OMP: cond.end: // OMP-NEXT: [[COND:%.*]] = phi i32 [ 33554431, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // OMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 -// OMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 +// OMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // OMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // OMP: omp.inner.for.cond: -// OMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 -// OMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// OMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // OMP-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // OMP: omp.inner.for.body: -// OMP-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// OMP-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // OMP-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // OMP-NEXT: store i32 [[ADD]], i32* [[I]], align 4 -// OMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 +// OMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64 // OMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // OMP-NEXT: store double 0.000000e+00, double* [[ARRAYIDX]], align 8 @@ -93,7 +93,7 @@ // OMP: omp.body.continue: // OMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // OMP: omp.inner.for.inc: -// OMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// OMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // OMP-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4 // OMP-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -130,33 +130,33 @@ // OMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // OMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // OMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 -// OMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 +// OMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) -// OMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 33554431 // OMP-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // OMP: cond.true: // OMP-NEXT: br label [[COND_END:%.*]] // OMP: cond.false: -// OMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: br label [[COND_END]] // OMP: cond.end: // OMP-NEXT: [[COND:%.*]] = phi i32 [ 33554431, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // OMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 -// OMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 +// OMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // OMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // OMP: omp.inner.for.cond: -// OMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 -// OMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// OMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] +// OMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // OMP-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // OMP: omp.inner.for.body: -// OMP-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// OMP-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // OMP-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] // OMP-NEXT: store i32 [[ADD]], i32* [[I]], align 4 -// OMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 +// OMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64 // OMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // OMP-NEXT: store double 1.000000e+00, double* [[ARRAYIDX]], align 8 @@ -164,7 +164,7 @@ // OMP: omp.body.continue: // OMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // OMP: omp.inner.for.inc: -// OMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// OMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !noundef [[NOUNDEF3]] // OMP-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // OMP-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4 // OMP-NEXT: br label [[OMP_INNER_FOR_COND]] @@ -184,20 +184,20 @@ // NOOMP-NEXT: store i32 0, i32* [[I]], align 4 // NOOMP-NEXT: br label [[FOR_COND:%.*]] // NOOMP: for.cond: -// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // NOOMP-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432 // NOOMP-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // NOOMP: for.body: -// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // NOOMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // NOOMP-NEXT: store double 0.000000e+00, double* [[ARRAYIDX]], align 8 // NOOMP-NEXT: br label [[FOR_INC:%.*]] // NOOMP: for.inc: -// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // NOOMP-NEXT: store i32 [[INC]], i32* [[I]], align 4 -// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // NOOMP: for.end: // NOOMP-NEXT: call void @foo() // NOOMP-NEXT: ret i32 0 @@ -210,20 +210,20 @@ // NOOMP-NEXT: store i32 0, i32* [[I]], align 4 // NOOMP-NEXT: br label [[FOR_COND:%.*]] // NOOMP: for.cond: -// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432 // NOOMP-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // NOOMP: for.body: -// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // NOOMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // NOOMP-NEXT: store double 1.000000e+00, double* [[ARRAYIDX]], align 8 // NOOMP-NEXT: br label [[FOR_INC:%.*]] // NOOMP: for.inc: -// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // NOOMP-NEXT: store i32 [[INC]], i32* [[I]], align 4 -// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // NOOMP: for.end: // NOOMP-NEXT: ret void // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected @@ -28,20 +28,20 @@ // NOOMP-NEXT: store i32 0, i32* [[I]], align 4 // NOOMP-NEXT: br label [[FOR_COND:%.*]] // NOOMP: for.cond: -// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // NOOMP-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432 // NOOMP-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // NOOMP: for.body: -// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // NOOMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // NOOMP-NEXT: store double 0.000000e+00, double* [[ARRAYIDX]], align 8 // NOOMP-NEXT: br label [[FOR_INC:%.*]] // NOOMP: for.inc: -// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // NOOMP-NEXT: store i32 [[INC]], i32* [[I]], align 4 -// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]] +// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // NOOMP: for.end: // NOOMP-NEXT: call void @foo() // NOOMP-NEXT: ret i32 0 @@ -73,20 +73,20 @@ // NOOMP-NEXT: store i32 0, i32* [[I]], align 4 // NOOMP-NEXT: br label [[FOR_COND:%.*]] // NOOMP: for.cond: -// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP0:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432 // NOOMP-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] // NOOMP: for.body: -// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP1:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64 // NOOMP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]] // NOOMP-NEXT: store double 1.000000e+00, double* [[ARRAYIDX]], align 8 // NOOMP-NEXT: br label [[FOR_INC:%.*]] // NOOMP: for.inc: -// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4 +// NOOMP-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4, !noundef [[NOUNDEF2]] // NOOMP-NEXT: [[INC:%.*]] = add nsw i32 [[TMP2]], 1 // NOOMP-NEXT: store i32 [[INC]], i32* [[I]], align 4 -// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] +// NOOMP-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] // NOOMP: for.end: // NOOMP-NEXT: ret void // diff --git a/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.expected @@ -8,8 +8,8 @@ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP0]], [[CONV]] // CHECK-NEXT: ret i64 [[ADD]] @@ -27,11 +27,11 @@ // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: store i32 [[B:%.*]], i32* [[B_ADDR]], align 4 // CHECK-NEXT: store i32 [[C:%.*]], i32* [[C_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP0]], [[CONV]] -// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[C_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 // CHECK-NEXT: [[ADD2:%.*]] = add nsw i64 [[ADD]], [[CONV1]] // CHECK-NEXT: ret i64 [[ADD2]] diff --git a/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.funcsig.expected b/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.funcsig.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.funcsig.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/mangled_names.c.funcsig.expected @@ -9,8 +9,8 @@ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8, !noundef [[NOUNDEF2:![0-9]+]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP0]], [[CONV]] // CHECK-NEXT: ret i64 [[ADD]] @@ -29,11 +29,11 @@ // CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK-NEXT: store i32 [[C]], i32* [[C_ADDR]], align 4 -// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8, !noundef [[NOUNDEF2]] +// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[B_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP1]] to i64 // CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP0]], [[CONV]] -// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[C_ADDR]], align 4 +// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[C_ADDR]], align 4, !noundef [[NOUNDEF2]] // CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[TMP2]] to i64 // CHECK-NEXT: [[ADD2:%.*]] = add nsw i64 [[ADD]], [[CONV1]] // CHECK-NEXT: ret i64 [[ADD2]] diff --git a/clang/test/utils/update_cc_test_checks/Inputs/resolve-tmp-conflict.cpp.expected b/clang/test/utils/update_cc_test_checks/Inputs/resolve-tmp-conflict.cpp.expected --- a/clang/test/utils/update_cc_test_checks/Inputs/resolve-tmp-conflict.cpp.expected +++ b/clang/test/utils/update_cc_test_checks/Inputs/resolve-tmp-conflict.cpp.expected @@ -12,7 +12,7 @@ // CHECK-NEXT: store i32* [[A_ADDR]], i32** [[_TMP0]], align 8 // CHECK-NEXT: store i32 1, i32* [[REF_TMP]], align 4 // CHECK-NEXT: store i32* [[REF_TMP]], i32** [[_TMP1]], align 8 -// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4, !noundef [[NOUNDEF2:![0-9]+]] // CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[_TMP1]], align 8 // CHECK-NEXT: store i32 [[TMP0]], i32* [[TMP1]], align 4 // CHECK-NEXT: ret void